anzorq commited on
Commit
840eaf7
Β·
1 Parent(s): 321b38e

ui changes + translation

Browse files
Files changed (1) hide show
  1. app.py +0 -5
app.py CHANGED
@@ -8,8 +8,6 @@ model = whisper.load_model("base")
8
  def transcribe(audio, state={}, delay=0.2, lang=None, translate=False):
9
  time.sleep(delay)
10
 
11
- # state = {"transcription": "", "translation": ""}
12
-
13
  transcription = model.transcribe(
14
  audio,
15
  language = lang if lang != "auto" else None
@@ -71,7 +69,6 @@ lang_dropdown = gr.inputs.Dropdown(choices=["auto", "english", "afrikaans",
71
  "yoruba"],
72
  label="Language", default="auto", type="value")
73
 
74
- # chechbox whether to translate
75
  translate_checkbox = gr.inputs.Checkbox(label="Translate to English", default=False)
76
 
77
 
@@ -84,9 +81,7 @@ state = gr.State({"transcription": "", "translation": ""})
84
 
85
  gr.Interface(
86
  fn=transcribe,
87
- # fn=debug,
88
  inputs=[
89
- # gr.Audio(source="upload", type="filepath"),
90
  gr.Audio(source="microphone", type="filepath", streaming=True),
91
  state,
92
  delay_slider,
 
8
  def transcribe(audio, state={}, delay=0.2, lang=None, translate=False):
9
  time.sleep(delay)
10
 
 
 
11
  transcription = model.transcribe(
12
  audio,
13
  language = lang if lang != "auto" else None
 
69
  "yoruba"],
70
  label="Language", default="auto", type="value")
71
 
 
72
  translate_checkbox = gr.inputs.Checkbox(label="Translate to English", default=False)
73
 
74
 
 
81
 
82
  gr.Interface(
83
  fn=transcribe,
 
84
  inputs=[
 
85
  gr.Audio(source="microphone", type="filepath", streaming=True),
86
  state,
87
  delay_slider,