Request a microphone at an onclick event

The other day, I came across this example of Javascript audio recordings:

http://webaudiodemos.appspot.com/AudioRecorder/index.html

What I used to implement my own. The problem I am facing is that in this file:

var audioContext = new webkitAudioContext(); var audioInput = null, realAudioInput = null, inputPoint = null, audioRecorder = null; var rafID = null; var analyserContext = null; var canvasWidth, canvasHeight; var recIndex = 0; /* TODO: - offer mono option - "Monitor input" switch */ function saveAudio() { audioRecorder.exportWAV( doneEncoding ); } function drawWave( buffers ) { var canvas = document.getElementById( "wavedisplay" ); drawBuffer( canvas.width, canvas.height, canvas.getContext('2d'), buffers[0] ); } function doneEncoding( blob ) { Recorder.forceDownload( blob, "myRecording" + ((recIndex<10)?"0":"") + recIndex + ".wav" ); recIndex++; } function toggleRecording( e ) { if (e.classList.contains("recording")) { // stop recording audioRecorder.stop(); e.classList.remove("recording"); audioRecorder.getBuffers( drawWave ); } else { // start recording if (!audioRecorder) return; e.classList.add("recording"); audioRecorder.clear(); audioRecorder.record(); } } // this is a helper function to force mono for some interfaces that return a stereo channel for a mono source. // it not currently used, but probably will be in the future. function convertToMono( input ) { var splitter = audioContext.createChannelSplitter(2); var merger = audioContext.createChannelMerger(2); input.connect( splitter ); splitter.connect( merger, 0, 0 ); splitter.connect( merger, 0, 1 ); return merger; } function toggleMono() { if (audioInput != realAudioInput) { audioInput.disconnect(); realAudioInput.disconnect(); audioInput = realAudioInput; } else { realAudioInput.disconnect(); audioInput = convertToMono( realAudioInput ); } audioInput.connect(inputPoint); } function cancelAnalyserUpdates() { window.webkitCancelAnimationFrame( rafID ); rafID = null; } function updateAnalysers(time) { if (!analyserContext) { var canvas = document.getElementById("analyser"); canvasWidth = canvas.width; canvasHeight = canvas.height; analyserContext = canvas.getContext('2d'); } // analyzer draw code here { var SPACING = 3; var BAR_WIDTH = 1; var numBars = Math.round(canvasWidth / SPACING); var freqByteData = new Uint8Array(analyserNode.frequencyBinCount); analyserNode.getByteFrequencyData(freqByteData); analyserContext.clearRect(0, 0, canvasWidth, canvasHeight); analyserContext.fillStyle = '#F6D565'; analyserContext.lineCap = 'round'; var multiplier = analyserNode.frequencyBinCount / numBars; // Draw rectangle for each frequency bin. for (var i = 0; i < numBars; ++i) { var magnitude = 0; var offset = Math.floor( i * multiplier ); // gotta sum/average the block, or we miss narrow-bandwidth spikes for (var j = 0; j< multiplier; j++) magnitude += freqByteData[offset + j]; magnitude = magnitude / multiplier; var magnitude2 = freqByteData[i * multiplier]; analyserContext.fillStyle = "hsl( " + Math.round((i*360)/numBars) + ", 100%, 50%)"; analyserContext.fillRect(i * SPACING, canvasHeight, BAR_WIDTH, -magnitude); } } rafID = window.webkitRequestAnimationFrame( updateAnalysers ); } function gotStream(stream) { // "inputPoint" is the node to connect your output recording to. inputPoint = audioContext.createGainNode(); // Create an AudioNode from the stream. realAudioInput = audioContext.createMediaStreamSource(stream); audioInput = realAudioInput; audioInput.connect(inputPoint); // audioInput = convertToMono( input ); analyserNode = audioContext.createAnalyser(); analyserNode.fftSize = 2048; inputPoint.connect( analyserNode ); audioRecorder = new Recorder( inputPoint ); zeroGain = audioContext.createGainNode(); zeroGain.gain.value = 0.0; inputPoint.connect( zeroGain ); zeroGain.connect( audioContext.destination ); updateAnalysers(); } function initAudio() { if (!navigator.webkitGetUserMedia) return(alert("Error: getUserMedia not supported!")); navigator.webkitGetUserMedia({audio:true}, gotStream, function(e) { alert('Error getting audio'); console.log(e); }); } window.addEventListener('load', initAudio ); 

As you could see, the initAudio () function (requesting permission from the user to use its microphone) is called immediately after the page loads (read the last line) using this method:

 window.addEventListener('load', initAudio ); 

Now I have this code in HTML:

 <script type="text/javascript" > $(function() { $("#recbutton").on("click", function() { $("#entrance").hide(); $("#live").fadeIn("slow"); toggleRecording(this); $(this).toggle(); return $("#stopbutton").toggle(); }); return $("#stopbutton").on("click", function() { audioRecorder.stop(); $(this).toggle(); $("#recbutton").toggle(); $("#live").hide(); return $("#entrance").fadeIn("slow"); }); }); </script> 

And as you can see, I call the toggleRecording (this) function (the one that starts the recording process) only after pressing the #recbutton button. Now everything works fine with this code. BUT, the user receives a microphone permission request as soon as the page loads, and I want to ask them for permission to use the microphone ONLY AFTER they clicked the #recbutton button. Do you understand me? I claimed that if I delete the last line of the first file:

 window.addEventListener('load', initAudio ); 

and change my inline script as follows:

 <script type="text/javascript" > $(function() { $("#recbutton").on("click", function() { $("#entrance").hide(); $("#live").fadeIn("slow"); initAudio(); toggleRecording(this); $(this).toggle(); return $("#stopbutton").toggle(); }); return $("#stopbutton").on("click", function() { audioRecorder.stop(); $(this).toggle(); $("#recbutton").toggle(); $("#live").hide(); return $("#entrance").fadeIn("slow"); }); }); </script> 

I could achieve what I wanted, and in fact I, the user does not receive prompts for his microphone until you press the #recbutton button. The problem is that the sound is never recorded when you try to download it, as a result of WAV it is empty.

How can i fix this?

My project code: https://github.com/Jmlevick/html-recorder

+4
source share
2 answers

I found an elegant and simple solution for this (or at least it is):

What I did was to make "main.js" and "recorder.js" inside the getScript call, which is executed only when a certain button (# button1) is clicked by the user ... These scripts are not loaded by the web page itself until button clicks, but we need some more great tricks to make them work as I described and wanted above:

in main.js , I changed:

 window.addEventListener('load', initAudio ); 

for:

 window.addEventListener('click', initAudio ); 

So, when scripts are loaded onto the page using getScript, the "main.js" file now listens for the click event on the web page to ask the user about the microphone. Then I needed to create a hidden button (# button2) on the page that jQuery accidentally clicked right after loading the scripts on the page, so it fires the “ask for microphone resolution” event, and then, just below that, a line of code that generates a fake click, which I added:

 window.removeEventListener("click", initAudio, false); 

therefore, the “workflow" for this trick ends as follows:

  • The user presses a button that loads the necessary js files onto the page using getScript, so it’s worth mentioning that now the main.js file listens for the click event in the window from the load .

  • We have a hidden button that “fakely clicked” from jQuery just at the moment you click the first, so it fires a permisson event for the user.

  • Once this event is fired, the click event listener is removed from the window, so it never fires the “ask permission” event again when the user clicks anywhere on the page.

And basically, that all people! :) Now, when the user goes to the page, he / she is never asked for the microphone resolution until you click the "Rec" button on the page as I wanted it. With one click of the user we do 3 things in jQuery, but for the user it seems that nothing happened, that the message "micphone permisson" appears on the screen immediately after pressing the "Rec" button.

0
source

No, your problem is that getUserMedia () has an asynchronous callback (gotMedia ()); you need to have the rest of your code logic in the startbutton call (specifically the toggleRecording bit) inside this callback, because right now it is running before getUserMedia returns (and sets up the sound nodes).

0
source

Source: https://habr.com/ru/post/1486378/


All Articles