File size: 3,053 Bytes
88413ab
666f810
 
ed9aac5
666f810
f42dcac
88413ab
666f810
ed9aac5
47bfd84
88413ab
2fa0634
875c690
2fa0634
875c690
2fa0634
 
 
 
 
ed9aac5
2fa0634
88413ab
 
 
 
82d5b80
88413ab
 
47bfd84
0af87d2
47bfd84
0af87d2
47bfd84
 
 
5869c97
0af87d2
666f810
 
5869c97
666f810
f456cb1
 
 
2fa0634
f456cb1
2fa0634
de6fe4c
2fa0634
4121ad0
 
35839aa
4121ad0
 
 
2fa0634
666f810
2fa0634
 
ed9aac5
 
47bfd84
ed9aac5
 
 
875c690
 
 
ed9aac5
 
 
47bfd84
ed9aac5
1503067
875c690
 
 
2fa0634
 
 
 
738bf68
 
ed9aac5
 
 
 
 
da91c46
ed9aac5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# imports
import os
os.system("pip install git+https://github.com/openai/whisper.git")
import gradio as gr
import whisper

# the model we are using for ASR, options are small, medium, large and largev2 (large and largev2 don't fit on huggingface cpu)
model = whisper.load_model("small")


# A table to look up all the languages
language_id_lookup = {
            "Arabic"    : "ar",
            "English"   : "en",
            "Chinese"   : "zh",
            "German"    : "de",
            "Spanish"   : "es",
            "Russian"   : "ru",
            "French"    : "fr",
            }



# The predict function. audio, language and mic_audio are all parameters directly passed by gradio 
# which means they are user inputted. They are specified in gr.inputs[] block at the bottom. The 
# gr.outputs[] block will specify the output type. 
def predict(audio, language, mic_audio=None):
    
    # checks if mic_audio is used, otherwise feeds model uploaded audio
    if mic_audio is not None:
        input_audio = mic_audio
    elif audio is not None:
        input_audio = audio
    else:
        return "(please provide audio)"

    # Uses the model's preprocessing methods to preprocess audio
    audio = whisper.load_audio(input_audio)
    audio = whisper.pad_or_trim(audio)
    
    # Calculates the mel frequency spectogram
    mel = whisper.log_mel_spectrogram(audio).to(model.device)
    
    # if model is supposed to detect language, set outLanguage to None
    # otherwise set to specified language
    if(language == "Detect Language"):
        outLanguage = None
    else:
        outLanguage = language_id_lookup[language.split()[0]]

    options = whisper.DecodingOptions(fp16 = False, language = outLanguage)
    result = whisper.decode(model, mel, options)
    outLanguage = result.language

    


    
    print(result.text + " " + outLanguage)
    return result.text, outLanguage



title = "Demo for Whisper -> Something -> XLS-R"

description = """
<b>How to use:</b> Upload an audio file or record using the microphone. The audio is into the whisper model developed by openai. 
The output is the text transcription of the audio in the language you inputted. If you asked the model to detect a language, it will
tell you what language it detected.
"""

gr.Interface(
    fn=predict,
    inputs=[
        gr.Audio(label="Upload Speech", source="upload", type="filepath"),
        gr.inputs.Dropdown(['Arabic Text',
                            'Chinese Text',
                            'English Text',
                            'German Text',
                            'Spanish Text',
                            'Russian Text',
                            'French Text',
                            'Detect Language'], type="value", default='English Text', label="Select the Language of the that you are speaking in."),
        gr.Audio(label="Record Speech", source="microphone", type="filepath"),
    ],
    outputs=[
        gr.Text(label="Transcription"),
    ],
    title=title,
    description=description,
).launch()