如何使用AudioContext HTML5从麦克风获取音频数据

How do I get audio data from microphone using AudioContext HTML5

本文关键字:获取 音频 数据 麦克风 何使用 AudioContext HTML5      更新时间:2023-09-26

我正试图从麦克风中获取数据流(例如音量、音高)。目前,我一直在使用getUserMedia访问我的麦克风音频。但我找不到从中提取数据的方法

我的代码:

$(function () {
  var audioContext = new AudioContext();
  var audioInput = null,
      realAudioInput = null,
      inputPoint = null,
      analyserNode = null;
  if (!navigator.getUserMedia)
          navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
                        navigator.mozGetUserMedia || navigator.msGetUserMedia;
   
  if (navigator.getUserMedia){
      navigator.getUserMedia({audio:true}, success, function(e) {
      alert('Error capturing audio.');
      });
  } else alert('getUserMedia not supported in this browser.');
  function success(stream){
    inputPoint = audioContext.createGain();
    realAudioInput = audioContext.createMediaStreamSource(stream);
    audioInput = realAudioInput;
    audioInput.connect(inputPoint);
    analyserNode = audioContext.createAnalyser();
    analyserNode.fftSize = 2048;
    inputPoint.connect( analyserNode );
  }
  function live(){
    requestAnimationFrame(live);
    var freqByteData = new Uint8Array(analyserNode.frequencyBinCount);
    analyserNode.getByteFrequencyData(freqByteData);
    console.log(analyserNode.frequencyBinCount); 
  }
});

这里有一个代码版本,它可以做两件事:

  • 从现场麦克风中检索原始PCM音频缓冲区,并将其发送到console.log(显示javascript控制台点击ctrl-shift-i),这是时域中流式麦克风音频数据的PCM原始音频曲线
  • 它还将相同的音频数据运行到FFT(快速傅立叶变换)中,FFT也发送到console.log,这是相同Web audio API事件循环缓冲区的频域表示

注意-戴上耳机或调低扬声器音量,否则你会听到音频反馈的尖叫声,因为麦克风会拾取扬声器音频,就像吉米·亨德里克斯一样!

<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>capture microphone then show time & frequency domain output</title>
<script type="text/javascript">
var webaudio_tooling_obj = function () {
    var audioContext = new AudioContext();
    console.log("audio is starting up ...");
    var BUFF_SIZE_RENDERER = 16384;
    var audioInput = null,
    microphone_stream = null,
    gain_node = null,
    script_processor_node = null,
    script_processor_analysis_node = null,
    analyser_node = null;
    if (!navigator.getUserMedia)
        navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
    navigator.mozGetUserMedia || navigator.msGetUserMedia;
    if (navigator.getUserMedia){
        navigator.getUserMedia({audio:true}, 
            function(stream) {
                start_microphone(stream);
            },
            function(e) {
                alert('Error capturing audio.');
            }
            );
    } else { alert('getUserMedia not supported in this browser.'); }
    // ---
    function show_some_data(given_typed_array, num_row_to_display, label) {
        var size_buffer = given_typed_array.length;
        var index = 0;
        console.log("__________ " + label);
        if (label === "time") {
            for (; index < num_row_to_display && index < size_buffer; index += 1) {
                var curr_value_time = (given_typed_array[index] / 128) - 1.0;
                console.log(curr_value_time);
            }
        } else if (label === "frequency") {
            for (; index < num_row_to_display && index < size_buffer; index += 1) {
                console.log(given_typed_array[index]);
            }
        } else {
            throw new Error("ERROR - must pass time or frequency");
        }
    }
    function process_microphone_buffer(event) {
        var i, N, inp, microphone_output_buffer;
        microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now
    }
    function start_microphone(stream){
        gain_node = audioContext.createGain();
        gain_node.connect( audioContext.destination );
        microphone_stream = audioContext.createMediaStreamSource(stream);
        microphone_stream.connect(gain_node); 
        script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE_RENDERER, 1, 1);
        script_processor_node.onaudioprocess = process_microphone_buffer;
        microphone_stream.connect(script_processor_node);
        // --- enable volume control for output speakers
        document.getElementById('volume').addEventListener('change', function() {
            var curr_volume = this.value;
            gain_node.gain.value = curr_volume;
            console.log("curr_volume ", curr_volume);
        });
        // --- setup FFT
        script_processor_analysis_node = audioContext.createScriptProcessor(2048, 1, 1);
        script_processor_analysis_node.connect(gain_node);
        analyser_node = audioContext.createAnalyser();
        analyser_node.smoothingTimeConstant = 0;
        analyser_node.fftSize = 2048;
        microphone_stream.connect(analyser_node);
        analyser_node.connect(script_processor_analysis_node);
        var buffer_length = analyser_node.frequencyBinCount;
        var array_freq_domain = new Uint8Array(buffer_length);
        var array_time_domain = new Uint8Array(buffer_length);
        console.log("buffer_length " + buffer_length);
        script_processor_analysis_node.onaudioprocess = function() {
            // get the average for the first channel
            analyser_node.getByteFrequencyData(array_freq_domain);
            analyser_node.getByteTimeDomainData(array_time_domain);
            // draw the spectrogram
            if (microphone_stream.playbackState == microphone_stream.PLAYING_STATE) {
                show_some_data(array_freq_domain, 5, "frequency");
                show_some_data(array_time_domain, 5, "time"); // store this to record to aggregate buffer/file
            }
        };
    }
}(); //  webaudio_tooling_obj = function()
</script>
</head>
<body>
    <p>Volume</p>
    <input id="volume" type="range" min="0" max="1" step="0.1" value="0.5"/>
</body>
</html>