yellowcandle commited on
Commit
a4e4751
1 Parent(s): 043229b

Add model selection dropdown to Gradio interface

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -7,10 +7,13 @@ from datasets import load_dataset
7
 
8
  @spaces.GPU(duration=120)
9
  def transcribe_audio(audio):
 
 
 
10
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
11
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
12
 
13
- model_id = "openai/whisper-large-v3"
14
 
15
  model = AutoModelForSpeechSeq2Seq.from_pretrained(
16
  model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
@@ -36,7 +39,7 @@ def transcribe_audio(audio):
36
 
37
 
38
  demo = gr.Interface(fn=transcribe_audio,
39
- inputs=gr.Audio(sources="upload", type="filepath"),
40
  outputs="text")
41
  demo.launch()
42
 
 
7
 
8
  @spaces.GPU(duration=120)
9
  def transcribe_audio(audio):
10
+ if audio is None:
11
+ return "Please upload an audio file."
12
+
13
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
14
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
15
 
16
+ model_id = ["openai/whisper-large-v3", "alvanlii/whisper-small-cantonese"]
17
 
18
  model = AutoModelForSpeechSeq2Seq.from_pretrained(
19
  model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
 
39
 
40
 
41
  demo = gr.Interface(fn=transcribe_audio,
42
+ inputs=[gr.Audio(sources="upload", type="filepath"), gr.Dropdown(choices=["openai/whisper-large-v3", "alvanlii/whisper-small-cantonese"])],
43
  outputs="text")
44
  demo.launch()
45