File size: 1,170 Bytes
476b1c3
62443d2
 
7640294
62443d2
7640294
476b1c3
 
 
 
 
62443d2
70c05b8
 
 
 
476b1c3
 
62443d2
70c05b8
 
7640294
 
 
 
70c05b8
476b1c3
 
 
70c05b8
 
62443d2
476b1c3
70c05b8
 
 
80a0e95
476b1c3
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
from src import infer, utils
import gradio as gr

infer.model_preloader_downloader()

AUDIO_EXAMPLE = [
    [None, "assets/audio/male-indonesian.wav", None],
    [None, "assets/audio/female-indonesian.wav", None],
    [None, "assets/audio/male-english.wav", None],
    [None, "assets/audio/female-english.wav", None],   
]

TITLE = "OpenAI Whisper"
DESCRIPTION = utils.parsing_text("assets/descriptions.md")
ARTICLE = utils.parsing_text("assets/articles.md")

demo = gr.Interface(
    fn=infer.predict, 
    inputs=[
        gr.Dropdown(
            label="Model",
            choices=[
                "tiny", "base", "small", "medium", 
                "large", "large-v1", "large-v2"
            ],
            value="base"),
        gr.Radio(label="Language", 
                 choices=["indonesian","english"], 
                 value="indonesian"),
        gr.Audio(label="Speak", source="microphone", type="filepath"),
        gr.Audio(label="Upload Audio", source="upload", type="filepath"),
    ],
    outputs=[gr.TextArea(label="Output Text"),],
    title=TITLE,
    description=DESCRIPTION,
    article=ARTICLE,
    # examples=audio_examples,
)

demo.launch()