Edittted to ad clarity
I am looking to create an HTML5 Video playback that triggers events at specific regularly timed cue-points. For example, I'd like an event to fire every second during video playback that checks the contents of a textbox (i.e. at second1 textbox contained; at second2 textbox contained). The 'tricky' part is that I need it to work across all major platforms/browsers, and that includes IPhones and IPads.
IPhones particularly seem to be a problem in that no matter the player, the setting, the hack I've tried - when a video starts playing, the browser goes to the background and the video is played in a full-screen container (Quicktime?). When the video stops playing and control is back with the browser, I see that the cuepoint events fired, but that's of no use if the textbox is unreachable during video playback!
I am very familiar with FlowPlayer and have already done a bunch of work to ensure it works for playback across most relevant platforms; the cuepoint feature of its API seems to be exactly what we need BUT there's a warning/restriction specific to it:
Be aware that cuepoints are subject to device restrictions regarding the HTML5 video API. On devices which do not support inline video
because they delegate playback to a system component (e.g. QuickTime
on iPhone) the effect of cuepoints is next to none in real world
setups
Has anyone worked with Flowplayer cuepoints OR alternate tech on iPhones/iPads? Obviously, if I can maintain one code base that would be preferrable to having multiple platform-specific versions.
Here is a simple controller for the video element which tracks the timeupdate event fired by the video element to trigger callback functions at the specified timecodes.
It allows you to attach multiple callbacks for the same timecode.
The timeupdate event is fired at different rates on different devices, because of this there is a limitation in that cuepoints can only be attached at integer values so at 5 seconds not 5.5, you can potentially remove this but then there is a risk that the cuepoints will not be triggered
/**
* Function which allows you to register cuepoints to trigger callbacks
* at specific timecodes
* #param {HTMLElement} video_el The video element you want to track
*/
var VideoController = function(video_el){
/**
* The video element this controller is for
* #type {HTMLElement}
*/
this.element = video_el; //
/**
* Object containing all the cuepoints
* #type {Object}
*/
this.cuepoints = {};
/**
* The last processed_timecode so we dont fire events more than once
* #type {Number}
*/
this.processed_timecode = undefined;
/**
* Allows you to trigger a callback at a specific timecode
* #param {Number} timecode The timecode you want to trigger your callback
* #param {Function} callback Your callback function
*/
this.addCuepoint = function(timecode, callback){
//
timecode = Math.floor(timecode);
//
if(this.cuepoints[timecode] === undefined){
this.cuepoints[timecode] = [];
}
this.cuepoints[timecode].push(callback);
return this;
}
/**
* Internal method to track the videos current timecode and to trigger
* the cuepoints when neccesary
* #param {[type]} e A timeupdate event from the video
*/
this.timeupdate = function(e){
var timecode = Math.floor(e.target.currentTime);
// check to see if there is a callback registered for this timecode
if(this.cuepoints.hasOwnProperty(timecode) && this.cuepoints[timecode] !== undefined && this.processed_timecode !== timecode){
//if there is it loops through the array of callbacks and triggers them
for(var i = 0,l=this.cuepoints[timecode].length;i<l;i++){
this.cuepoints[timecode][i]();
}
}
//updates the processed_timecode so we do not fire these callbacks again
this.processed_timecode = timecode;
}.bind(this);
// add addEventListener to the video element to track the video timecode
this.element.addEventListener('timeupdate', this.timeupdate);
return this;
}
var video = document.getElementById('myVideoElement');
var video_controller = new VideoController(video);
video_controller.addCuepoint(2,function(){
console.log('do something at 2 seconds');
});
video_controller.addCuepoint(2,function(){
console.log('do something else at 2 seconds');
});
I've adapted #Irfan 's answer above to use requestAnimationFrame instead of relying on the timeupdate event.
This will allow for a more granular triggering of events on a half second etc, just adjust the .tofixed(1) as appropriate.
This will not likely help you with the iPhones web video restrictions, but for other use cases it should be helpful to anyone else who needs precise triggering or events based on video time.
// Request Animation Frame Shim
// Source: https://www.paulirish.com/2011/requestanimationframe-for-smart-animating/
window.requestAnimFrame = (function(){
return window.requestAnimationFrame ||
window.webkitRequestAnimationFrame ||
window.mozRequestAnimationFrame ||
function( callback ){
window.setTimeout(callback, 1000 / 60);
};
})();
var VideoController = function(video_el){
this.element = video_el;
this.cuepoints = {};
this.processed_timecode = undefined;
var self = this;
this.addCuepoint = function(timecode, callback){
var timecode = timecode.toFixed(1);
if(this.cuepoints[timecode] === undefined){
this.cuepoints[timecode] = [];
}
this.cuepoints[timecode].push(callback);
return this;
}
this.rafUpdate = function(timestamp){
var timecode = video_el.currentTime.toFixed(1);
if(!video_el.duration){
requestAnimFrame(self.rafUpdate);
}else if( timecode < video_el.duration ){
requestAnimFrame(self.rafUpdate);
}
if( self.cuepoints.hasOwnProperty(timecode) && self.cuepoints[timecode] !== undefined && self.processed_timecode !== timecode ){
for(var i = 0,l=self.cuepoints[timecode].length;i<l;i++){
self.cuepoints[timecode][i]();
}
}
self.processed_timecode = timecode;
}
requestAnimFrame( this.rafUpdate ); // keeps better time than video.timeupdate
return this;
}
var video = document.getElementById('myvideo');
globalVars['video'] = video;
var video_controller = new VideoController(globalVars['video']);
video_controller.addCuepoint(10.2,function(){
endframe.play();
});
Related
I am trying to build a midi player using web audio API. I used tonejs to parse midi file into JSON. I am using mp3 files to play notes. Following are the relevant parts of the code:
//create audio samples
static async setupSample(audioContext, filepath) {
const response = await fetch(filepath);
const arrayBuffer = await response.arrayBuffer();
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
return audioBuffer;
}
//play a single sample
static playSample(audioContext, audioBuffer, time) {
const sampleSource = new AudioBufferSourceNode(audioContext, {
buffer: audioBuffer,
playbackRate: 1,
});
sampleSource.connect(audioContext.destination);
sampleSource.start(time);
return sampleSource;
}
Scheduling samples:
async start() {
this.startTime = this.audioCtx.currentTime;
this.play();
}
play() {
let nextNote = this.notes[this.noteIndex];
//schedule samples
while ((nextNote.time + this.startTime) - this.audioCtx.currentTime <= 0.250) {
let s = Audio.playSample(this.audioCtx, this.samples[nextNote.midi], this.startTime + nextNote.time);
s.stop(this.startTime + nextNote.time + nextNote.duration);
this.noteIndex++;
if (this.noteIndex == this.notes.length) {
break;
}
nextNote = this.notes[this.noteIndex];
}
if (this.noteIndex == this.notes.length) {
return;
}
requestAnimationFrame(() => {
this.play();
});
}
I am testing code with a midi file which contains C major scale. I have tested the midi file using timidity and it is fine.
The code does play the midi file correctly execpet a small problem: I hear some clicking sounds during playback. The clicking increases with increasing tempo but does not completely go away even with tempo as small as 50bpm. Any ideas what could be going wrong?
Full code can be viewed at : https://test.meedee.in/
Nothing is "wrong". You are observing a phenomenon intrinsic to the physics of audio.
Chopping audio samples arbitrarily like this creates clicks at the transitions. Any instantaneous change in level is heard as a click. To get rid of the clicks, apply an envelope to the sample, blend adjacent notes, or apply a low-pass filter.
I have created an audio worklet that performs pitch detection , all works fine but I want to free the microphone once I am done
I get the stream and wire everything up like this
const AudioContextConstructor =
window.AudioContext || window.webkitAudioContext;
this.audioContext = new AudioContextConstructor();
await this.audioContext.audioWorklet.addModule('js/worklet_pitcher.js');
this.stream = await navigator.mediaDevices.getUserMedia({ audio: true });
var mediaStreamSource = this.audioContext.createMediaStreamSource(this.stream);
this.pitchWorklet = new AudioWorkletNode(this.audioContext, 'pitch-processor');
mediaStreamSource.connect(this.pitchWorklet);
When I am done I simply do this
stop = (): void => {
if (this.running) {
this.audioContext.close();
this.running = false;
}
}
this stops the worklet pipeline but the red dot still shows in the browser tab meaning that I still own the mic.
I looked for a stream.close so I could explicitly close the MediaStream returned by getUserMediabut there isnt one
You also need to call stop() on each MediaStreamTrack of the MediaStream obtained from the mic.
this.stream.getTracks().forEach((track) => track.stop());
https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack/stop
https://developer.mozilla.org/en-US/docs/Web/API/MediaStream/getTracks
I'm loading an MP3 on my webpage using audio = new Audio(). But I'd like to know that when setting audio.currentTime, the audio can skip to any point in the file -near the end or wherever -without any delay in playback. Ie I want to know when the MP3 has downloaded in its entirety.
Can I use the Audio object/element for this, or must I use an AudioContext as shown here?
Every AudioElement is exposing its buffered data as a TimeRanges object. TimeRanges is an object which tells you how many continuous parts aka ranges are already buffered. It does also have getters which return the respective start and end of each range in seconds.
In case your AudioElement is named audio the following code snippet will log the buffered time ranges at a given point in time.
const numberOfRanges = audio.buffered.length;
for (let i = 0; i < numberOfRanges; i += 1) {
console.log(
audio.buffered.start(i),
audio.buffered.end(i)
);
}
If you want to detect the point in time at which all data is buffered you could use a check similar to this one:
const isBufferedCompletely = (audio.buffered.length === 1
&& audio.buffered.start(0) === 0
&& audio.buffered.end(0) === audio.duration);
I used the Gist referenced in the comments below to construct an example. The following snippet will periodically check if the file is already buffered. It will log a message to the console once that is the case. I tested it on Chrome (v74) and Firefox (v66) on OS X. Please note that the file can't be played at the same time as the script will set the currentTime of the Audio Element.
const audio = new Audio('http://www.obamadownloads.com/mp3s/charleston-eulogy-speech.mp3');
audio.preload = 'auto';
function detectBuffered(duration) {
// Stick with the duration once it is known because it might get updated
// when reaching the end of the file.
if (duration === undefined && !isNaN(audio.duration)) {
duration = audio.duration;
}
const isBufferedCompletely = (audio.buffered.length === 1
&& audio.buffered.start(0) === 0
&& audio.buffered.end(0) === duration);
if (isBufferedCompletely) {
const seconds = Math.round(duration);
console.log('The complete file is buffered.');
console.log(`It is about ${ seconds } seconds long.`);
} else {
// Move the playhead of the audio element to get the browser to load
// the complete file.
if (audio.buffered.length > 0) {
audio.currentTime = Math.max(0, audio.buffered.end(0) - 1);
}
setTimeout(detectBuffered, 100, duration);
}
}
detectBuffered();
My Codename One app features audio playback in the background when the user taps the screen. The audio I use is an mp3. Here is how I use the Media playback :
public static void playSound(boolean stop) {
sound.reset(); // The input stream needs to go back to the beginning
Media myClip = MediaManager.createMedia(sound, "audio/mp3", () -> {
// If there is no order to stop playback, we keep playing when it has completed (looping)
playSound(false);
});
if (!stop) {
myClip.play();
} else {
myClip.cleanup();
}
}
So hen the user taps the screen components change and I pass true to playSound method. On Android the current playback stops not on iOS with an iPhone 4.
Please note that when the app gets minimized (center button pressed) the playback stops (even if I don't call cleanup() on the Media which I do on Android to stop the playback when the app is minimized).
How can I stop the playback on iPhone ?
Any help appreciated,
#Shai pointed me to the right direction so here is the code finally used :
Media myClip = null;
public static void playSound(boolean stop) {
sound.reset(); // The input stream needs to go back to the beginning
/**
* If the media is playing we don't create it
* otherwise we would have several media in the wild
* that could not be stopped
*/
if (myClip == null || !myClip.isPlaying()) {
myClip = MediaManager.createMedia(sound, "audio/mp3", () -> {
// If there is no order to stop playback, we keep playing when it has completed (looping)
playSound(false);
});
}
if (!stop) {
myClip.play();
} else {
myClip.cleanup();
}
}
For mostly security reasons, I'm not allowed to store a WAV file on the server to be accessed by a browser. What I have is a byte array contains audio data (the data portion of a WAV file I believe) on the sever, and I want it to be played on a browser through JavaScript (or Applet but JS preferred), I can use JSON-PRC to send the whole byte[] over, or I can open a socket to stream it over, but in either case I don't know who to play the byte[] within the browser?
The following code will play the sine wave at 0.5 and 2.0. Call the function play_buffersource() in your button or anywhere you want.
Tested using Chrome with Web Audio flag enabled. For your case, all that you need to do is just to shuffle your audio bytes to the buf.
<script type="text/javascript">
const kSampleRate = 44100; // Other sample rates might not work depending on the your browser's AudioContext
const kNumSamples = 16834;
const kFrequency = 440;
const kPI_2 = Math.PI * 2;
function play_buffersource() {
if (!window.AudioContext) {
if (!window.webkitAudioContext) {
alert("Your browser sucks because it does NOT support any AudioContext!");
return;
}
window.AudioContext = window.webkitAudioContext;
}
var ctx = new AudioContext();
var buffer = ctx.createBuffer(1, kNumSamples, kSampleRate);
var buf = buffer.getChannelData(0);
for (i = 0; i < kNumSamples; ++i) {
buf[i] = Math.sin(kFrequency * kPI_2 * i / kSampleRate);
}
var node = ctx.createBufferSource(0);
node.buffer = buffer;
node.connect(ctx.destination);
node.noteOn(ctx.currentTime + 0.5);
node = ctx.createBufferSource(0);
node.buffer = buffer;
node.connect(ctx.destination);
node.noteOn(ctx.currentTime + 2.0);
}
</script>
References:
http://epx.com.br/artigos/audioapi.php
https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html
If you need to resample the audio, you can use a JavaScript resampler: https://github.com/grantgalitz/XAudioJS
If you need to decode the base64 data, there are a lot of JavaScript base64 decoder: https://github.com/carlo/jquery-base64
I accomplished this via the following code. I pass in a byte array containing the data from the wav file to the function playByteArray. My solution is similar to Peter Lee's, but I could not get his to work in my case (the output was garbled) whereas this solution works well for me. I verified that it works in Firefox and Chrome.
window.onload = init;
var context; // Audio context
var buf; // Audio buffer
function init() {
if (!window.AudioContext) {
if (!window.webkitAudioContext) {
alert("Your browser does not support any AudioContext and cannot play back this audio.");
return;
}
window.AudioContext = window.webkitAudioContext;
}
context = new AudioContext();
}
function playByteArray(byteArray) {
var arrayBuffer = new ArrayBuffer(byteArray.length);
var bufferView = new Uint8Array(arrayBuffer);
for (i = 0; i < byteArray.length; i++) {
bufferView[i] = byteArray[i];
}
context.decodeAudioData(arrayBuffer, function(buffer) {
buf = buffer;
play();
});
}
// Play the loaded file
function play() {
// Create a source node from the buffer
var source = context.createBufferSource();
source.buffer = buf;
// Connect to the final output node (the speakers)
source.connect(context.destination);
// Play immediately
source.start(0);
}
If you have the bytes on the server then I would suggest that you create some kind of handler on the server that will stream the bytes to the response as a wav file. This "file" would only be in memory on the server and not on disk. Then the browser can just handle it like a normal wav file.
More details on the server stack would be needed to give more information on how this could be done in your environment.
I suspect you can achieve this with HTML5 Audio API easily enough:
https://developer.mozilla.org/en/Introducing_the_Audio_API_Extension
This library might come in handy too, though I'm not sure if it reflects the latest browser behaviours:
https://github.com/jussi-kalliokoski/audiolib.js