Demo / app.py
chinmaydan's picture
accepting input from upload or mic
47bfd84
raw
history blame
1.53 kB
import os
os.system("pip install git+https://github.com/openai/whisper.git")
import gradio as gr
import whisper
model = whisper.load_model("small")
model.config.forced_decoder_ids = None
def predict(audio, mic_audio=None):
# audio = tuple (sample_rate, frames) or (sample_rate, (frames, channels))
if mic_audio is not None:
sampling_rate, waveform = mic_audio
elif audio is not None:
sampling_rate, waveform = audio
else:
return "(please provide audio)"
audio = whisper.load_audio(waveform)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(model.device)
_, probs = model.detect_language(mel)
options = whisper.DecodingOptions(fp16 = False)
result = whisper.decode(model, mel, options)
print(result.text)
return result.text, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
title = "Demo for Whisper -> Something -> XLS-R"
description = """
<b>How to use:</b> Upload an audio file or record using the microphone. The audio is converted to mono and resampled to 16 kHz before
being passed into the model. The output is the text transcription of the audio.
"""
gr.Interface(
fn=predict,
inputs=[
gr.Audio(label="Upload Speech", source="upload", type="numpy"),
gr.Audio(label="Record Speech", source="microphone", type="numpy"),
],
outputs=[
gr.Text(label="Transcription"),
],
title=title,
description=description,
).launch()