Demo / app.py
chinmaydan's picture
troubleshooting the detect language part
88413ab
raw
history blame
3.17 kB
# imports
import os
os.system("pip install git+https://github.com/openai/whisper.git")
import gradio as gr
import whisper
# the model we are using for ASR, options are small, medium, large and largev2 (large and largev2 don't fit on huggingface cpu)
model = whisper.load_model("small")
# A table to look up all the languages
language_id_lookup = {
"English" : "en",
"German" : "de",
"Greek" : "el",
"Spanish" : "es",
"Finnish" : "fi",
"Russian" : "ru",
"Hungarian" : "hu",
"Dutch" : "nl",
"French" : "fr",
'Polish' : "pl",
'Portuguese': "pt",
'Italian' : "it",
}
# The predict function. audio, language and mic_audio are all parameters directly passed by gradio
# which means they are user inputted. They are specified in gr.inputs[] block at the bottom. The
# gr.outputs[] block will specify the output type.
def predict(audio, language, mic_audio=None):
# checks if mic_audio is used, otherwise feeds model uploaded audio
if mic_audio is not None:
input_audio = mic_audio
elif audio is not None:
input_audio = audio
else:
return "(please provide audio)"
audio = whisper.load_audio(input_audio)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(model.device)
if(language == "Detect Language"):
outLanguage, probs = model._detect_language(mel)
print("Detected language is: " + outLanguage)
else:
outLanguage = language_id_lookup[language.split()[0]]
options = whisper.DecodingOptions(fp16 = False, language = outLanguage)
result = whisper.decode(model, mel, options)
print(result.text + " " + outLanguage)
return result.text, outLanguage
title = "Demo for Whisper -> Something -> XLS-R"
description = """
<b>How to use:</b> Upload an audio file or record using the microphone. The audio is converted to mono and resampled to 16 kHz before
being passed into the model. The output is the text transcription of the audio.
"""
gr.Interface(
fn=predict,
inputs=[
gr.Audio(label="Upload Speech", source="upload", type="filepath"),
gr.inputs.Dropdown(['English Text',
'German Text',
'Greek Text',
'Spanish Text',
'Finnish Text',
'Russian Text',
'Hungarian Text',
'Dutch Text',
'French Text',
'Polish Text',
'Portuguese Text',
'Italian Text',
'Detect Language'], type="value", default='English Text', label="Select the Language of the that you are speaking in."),
gr.Audio(label="Record Speech", source="microphone", type="filepath"),
],
outputs=[
gr.Text(label="Transcription"),
],
title=title,
description=description,
).launch()