import whisper model = whisper.load_model("base") from transformers import pipeline en_fr_translator = pipeline("translation_en_to_fr") import gradio as gr import time def transcribe(audio): #time.sleep(3) # load audio and pad/trim it to fit 30 seconds audio = whisper.load_audio(audio) audio = whisper.pad_or_trim(audio) # make log-Mel spectrogram and move to the same device as the model mel = whisper.log_mel_spectrogram(audio).to(model.device) # detect the spoken language _, probs = model.detect_language(mel) lang=(f"Detected language: {max(probs, key=probs.get)}") # decode the audio options = whisper.DecodingOptions(fp16 = False) result = whisper.decode(model, mel, options) word= result.text trans = en_fr_translator(word) Trans = trans[0]['translation_text'] result=f"{lang}\n{word}\n\nFrench translation: {Trans}" return result gr.Interface( title = 'OpenAI Whisper ASR Gradio Web UI', fn=transcribe, inputs=[ gr.inputs.Audio(source="microphone", type="filepath") ], outputs=[ "textbox" ], live=True).launch()