avfranco commited on
Commit
0d16ed8
1 Parent(s): 5ad5566

Update app.py

Browse files

Ask buttons / text set to visible

Files changed (1) hide show
  1. app.py +19 -18
app.py CHANGED
@@ -24,8 +24,9 @@ def audio_transcribe(inputs, task):
24
  raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
25
 
26
  text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
27
- ask_question.visible = True
28
-
 
29
  return text
30
 
31
  @spaces.GPU
@@ -46,28 +47,28 @@ def respond_to_question(transcript, question):
46
  ).choices[0].text
47
  return response
48
 
49
- def ask_question_callback():
50
- if ask_question.value:
51
- response = respond_to_question(transcript_output.value, ask_question.value)
52
- response_output.visible = True
53
- response_output.value = response
54
- else:
55
- response_output.value = "No question asked"
56
-
57
-
58
  with gr.Blocks() as transcriberUI:
59
  gr.Markdown(
60
- """
61
- # Ola!
62
- Clicar no botao abaixo para selecionar o Audio a ser transcrito!
63
- Ambiente Demo disponivel 24x7. Running on ZeroGPU with openai/whisper-large-v3
64
- """)
 
65
  inp = gr.File(label="Arquivo de Audio", show_label=True, type="filepath", file_count="single", file_types=["mp3"])
66
  transcribe = gr.Textbox(label="Transcricao", show_label=True, show_copy_button=True)
67
  ask_question = gr.Textbox(label="Ask a question", visible=False)
68
  response_output = gr.Textbox(label="Response", visible=False)
69
- submit_question = gr.Button("Submit question")
70
-
 
 
 
 
 
 
 
 
71
  inp.upload(audio_transcribe, inp, transcribe)
72
  submit_question.click(ask_question_callback, outputs=[response_output], inputs=[transcribe, ask_question])
73
 
 
24
  raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
25
 
26
  text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
27
+ transcriberUI.ask_question.visible = True
28
+ transcriberUI.submit_question.visible = True
29
+
30
  return text
31
 
32
  @spaces.GPU
 
47
  ).choices[0].text
48
  return response
49
 
 
 
 
 
 
 
 
 
 
50
  with gr.Blocks() as transcriberUI:
51
  gr.Markdown(
52
+ """
53
+ # Ola!
54
+ Clicar no botao abaixo para selecionar o Audio a ser transcrito!
55
+ Ambiente Demo disponivel 24x7. Running on ZeroGPU with openai/whisper-large-v3
56
+ """
57
+ )
58
  inp = gr.File(label="Arquivo de Audio", show_label=True, type="filepath", file_count="single", file_types=["mp3"])
59
  transcribe = gr.Textbox(label="Transcricao", show_label=True, show_copy_button=True)
60
  ask_question = gr.Textbox(label="Ask a question", visible=False)
61
  response_output = gr.Textbox(label="Response", visible=False)
62
+ submit_question = gr.Button("Submit question", visible=False)
63
+
64
+ def ask_question_callback():
65
+ if ask_question.value:
66
+ response = respond_to_question(transcript_output.value, ask_question.value)
67
+ response_output.visible = True
68
+ response_output.value = response
69
+ else:
70
+ response_output.value = "No question asked"
71
+
72
  inp.upload(audio_transcribe, inp, transcribe)
73
  submit_question.click(ask_question_callback, outputs=[response_output], inputs=[transcribe, ask_question])
74