Spaces:
Sleeping
Sleeping
File size: 2,923 Bytes
2f680f9 28e5df5 94dd019 2f680f9 6358e84 28e5df5 6d36763 28e5df5 6d36763 28e5df5 6d36763 28e5df5 6d36763 28e5df5 6d36763 28e5df5 6d36763 28e5df5 2f680f9 28e5df5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
import gradio as gr
import numpy as np
def process_audio(audio):
# This function will receive audio data from the client
# and can perform any server-side processing
# For this example, we'll just return the audio as-is
return audio
with gr.Blocks() as demo:
audio_input = gr.Audio(sources="microphone", streaming=True, visible=False)
audio_output = gr.Audio(streaming=True, visible=False)
html = gr.HTML("""
<button id="startButton">Start Recording</button>
<button id="stopButton" disabled>Stop Recording</button>
<div id="status">Ready</div>
<div id="debug"></div>
<script>
let audioContext;
let mediaStreamSource;
let processor;
let recording = false;
async function startRecording() {
try {
audioContext = new (window.AudioContext || window.webkitAudioContext)();
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
mediaStreamSource = audioContext.createMediaStreamSource(stream);
processor = audioContext.createScriptProcessor(1024, 1, 1);
mediaStreamSource.connect(processor);
processor.connect(audioContext.destination);
processor.onaudioprocess = function(e) {
if (!recording) return;
const audioData = e.inputBuffer.getChannelData(0);
document.getElementById('debug').textContent = 'Processing audio chunk...';
// Send audio data to the server
const blob = new Blob([audioData], {type: 'audio/wav'});
const file = new File([blob], 'audio.wav', {type: 'audio/wav'});
const dt = new DataTransfer();
dt.items.add(file);
gradioApp().querySelector('#component-0').querySelector('input[type=file]').files = dt.files;
gradioApp().querySelector('#component-0').querySelector('button[type=submit]').click();
};
recording = true;
document.getElementById('status').textContent = 'Recording...';
document.getElementById('startButton').disabled = true;
document.getElementById('stopButton').disabled = false;
} catch (err) {
console.error('Error starting recording:', err);
document.getElementById('status').textContent = 'Error: ' + err.message;
}
}
function stopRecording() {
if (processor) {
processor.disconnect();
mediaStreamSource.disconnect();
}
recording = false;
document.getElementById('status').textContent = 'Stopped';
document.getElementById('startButton').disabled = false;
document.getElementById('stopButton').disabled = true;
}
document.getElementById('startButton').addEventListener('click', startRecording);
document.getElementById('stopButton').addEventListener('click', stopRecording);
</script>
""")
audio_input.stream(process_audio, inputs=audio_input, outputs=audio_output)
if __name__ == "__main__":
demo.launch() |