import os
os.system("pip install git+https://github.com/openai/whisper.git")
import gradio as gr
import whisper

from transformers import WhisperProcessor, WhisperForConditionalGeneration

model = whisper.load_model("small")


def inference(audio):
    audio = whisper.load_audio(audio)
    audio = whisper.pad_or_trim(audio)
    
    mel = whisper.log_mel_spectrogram(audio).to(model.device)
    
    _, probs = model.detect_language(mel)
    
    options = whisper.DecodingOptions(fp16 = False)
    result = whisper.decode(model, mel, options)
    
    print(result.text)
    return result.text, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)


title = "Demo for Whisper -> Something -> XLS-R"

description = """
<b>How to use:</b> Upload an audio file or record using the microphone. The audio is converted to mono and resampled to 16 kHz before
being passed into the model. The output is the text transcription of the audio.
"""

gr.Interface(
    fn=inference,
    inputs=[
        gr.Audio(label="Upload Speech", source="upload", type="numpy"),
        gr.Audio(label="Record Speech", source="microphone", type="numpy"),
    ],
    outputs=[
        gr.Text(label="Transcription"),
    ],
    title=title,
    description=description,
).launch()