The other day, I came across this example of Javascript audio recordings:
http://webaudiodemos.appspot.com/AudioRecorder/index.html
What I used to implement my own. The problem I am facing is that in this file:
var audioContext = new webkitAudioContext(); var audioInput = null, realAudioInput = null, inputPoint = null, audioRecorder = null; var rafID = null; var analyserContext = null; var canvasWidth, canvasHeight; var recIndex = 0; /* TODO: - offer mono option - "Monitor input" switch */ function saveAudio() { audioRecorder.exportWAV( doneEncoding ); } function drawWave( buffers ) { var canvas = document.getElementById( "wavedisplay" ); drawBuffer( canvas.width, canvas.height, canvas.getContext('2d'), buffers[0] ); } function doneEncoding( blob ) { Recorder.forceDownload( blob, "myRecording" + ((recIndex<10)?"0":"") + recIndex + ".wav" ); recIndex++; } function toggleRecording( e ) { if (e.classList.contains("recording")) { // stop recording audioRecorder.stop(); e.classList.remove("recording"); audioRecorder.getBuffers( drawWave ); } else { // start recording if (!audioRecorder) return; e.classList.add("recording"); audioRecorder.clear(); audioRecorder.record(); } } // this is a helper function to force mono for some interfaces that return a stereo channel for a mono source. // it not currently used, but probably will be in the future. function convertToMono( input ) { var splitter = audioContext.createChannelSplitter(2); var merger = audioContext.createChannelMerger(2); input.connect( splitter ); splitter.connect( merger, 0, 0 ); splitter.connect( merger, 0, 1 ); return merger; } function toggleMono() { if (audioInput != realAudioInput) { audioInput.disconnect(); realAudioInput.disconnect(); audioInput = realAudioInput; } else { realAudioInput.disconnect(); audioInput = convertToMono( realAudioInput ); } audioInput.connect(inputPoint); } function cancelAnalyserUpdates() { window.webkitCancelAnimationFrame( rafID ); rafID = null; } function updateAnalysers(time) { if (!analyserContext) { var canvas = document.getElementById("analyser"); canvasWidth = canvas.width; canvasHeight = canvas.height; analyserContext = canvas.getContext('2d'); } // analyzer draw code here { var SPACING = 3; var BAR_WIDTH = 1; var numBars = Math.round(canvasWidth / SPACING); var freqByteData = new Uint8Array(analyserNode.frequencyBinCount); analyserNode.getByteFrequencyData(freqByteData); analyserContext.clearRect(0, 0, canvasWidth, canvasHeight); analyserContext.fillStyle = '#F6D565'; analyserContext.lineCap = 'round'; var multiplier = analyserNode.frequencyBinCount / numBars; // Draw rectangle for each frequency bin. for (var i = 0; i < numBars; ++i) { var magnitude = 0; var offset = Math.floor( i * multiplier ); // gotta sum/average the block, or we miss narrow-bandwidth spikes for (var j = 0; j< multiplier; j++) magnitude += freqByteData[offset + j]; magnitude = magnitude / multiplier; var magnitude2 = freqByteData[i * multiplier]; analyserContext.fillStyle = "hsl( " + Math.round((i*360)/numBars) + ", 100%, 50%)"; analyserContext.fillRect(i * SPACING, canvasHeight, BAR_WIDTH, -magnitude); } } rafID = window.webkitRequestAnimationFrame( updateAnalysers ); } function gotStream(stream) { // "inputPoint" is the node to connect your output recording to. inputPoint = audioContext.createGainNode(); // Create an AudioNode from the stream. realAudioInput = audioContext.createMediaStreamSource(stream); audioInput = realAudioInput; audioInput.connect(inputPoint); // audioInput = convertToMono( input ); analyserNode = audioContext.createAnalyser(); analyserNode.fftSize = 2048; inputPoint.connect( analyserNode ); audioRecorder = new Recorder( inputPoint ); zeroGain = audioContext.createGainNode(); zeroGain.gain.value = 0.0; inputPoint.connect( zeroGain ); zeroGain.connect( audioContext.destination ); updateAnalysers(); } function initAudio() { if (!navigator.webkitGetUserMedia) return(alert("Error: getUserMedia not supported!")); navigator.webkitGetUserMedia({audio:true}, gotStream, function(e) { alert('Error getting audio'); console.log(e); }); } window.addEventListener('load', initAudio );
As you could see, the initAudio () function (requesting permission from the user to use its microphone) is called immediately after the page loads (read the last line) using this method:
window.addEventListener('load', initAudio );
Now I have this code in HTML:
<script type="text/javascript" > $(function() { $("#recbutton").on("click", function() { $("#entrance").hide(); $("#live").fadeIn("slow"); toggleRecording(this); $(this).toggle(); return $("#stopbutton").toggle(); }); return $("#stopbutton").on("click", function() { audioRecorder.stop(); $(this).toggle(); $("#recbutton").toggle(); $("#live").hide(); return $("#entrance").fadeIn("slow"); }); }); </script>
And as you can see, I call the toggleRecording (this) function (the one that starts the recording process) only after pressing the #recbutton button. Now everything works fine with this code. BUT, the user receives a microphone permission request as soon as the page loads, and I want to ask them for permission to use the microphone ONLY AFTER they clicked the #recbutton button. Do you understand me? I claimed that if I delete the last line of the first file:
window.addEventListener('load', initAudio );
and change my inline script as follows:
<script type="text/javascript" > $(function() { $("#recbutton").on("click", function() { $("#entrance").hide(); $("#live").fadeIn("slow"); initAudio(); toggleRecording(this); $(this).toggle(); return $("#stopbutton").toggle(); }); return $("#stopbutton").on("click", function() { audioRecorder.stop(); $(this).toggle(); $("#recbutton").toggle(); $("#live").hide(); return $("#entrance").fadeIn("slow"); }); }); </script>
I could achieve what I wanted, and in fact I, the user does not receive prompts for his microphone until you press the #recbutton button. The problem is that the sound is never recorded when you try to download it, as a result of WAV it is empty.
How can i fix this?
My project code: https://github.com/Jmlevick/html-recorder