Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -90,17 +90,32 @@ def transcribe(asr_inputs, task):
|
|
90 |
raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
|
91 |
|
92 |
text = asr_pl(asr_inputs, batch_size=ASR_BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
|
93 |
-
return
|
94 |
|
95 |
|
96 |
"""Gradio User Interface"""
|
97 |
#audio_input = gr.Audio(sources="upload", type="filepath", label="Audio: from file") #gr.Audio(sources="microphone", type="filepath", label="Audio: from microphone")
|
98 |
#audio_input_choice = gr.Radio(["audio file", "microphone"], label="Audio Input Source", value="audio file") #
|
99 |
|
|
|
100 |
audio_input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Audio Input Source")
|
101 |
task_input_choice = gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")
|
102 |
task_output = gr.Textbox(label="Transcribed Output")
|
103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
transcribe_interface = gr.Interface(
|
105 |
fn=transcribe,
|
106 |
inputs=[
|
@@ -110,6 +125,7 @@ transcribe_interface = gr.Interface(
|
|
110 |
],
|
111 |
outputs=[
|
112 |
task_output, #"text",
|
|
|
113 |
],
|
114 |
title=application_title,
|
115 |
description=application_description,
|
@@ -120,18 +136,6 @@ transcribe_interface = gr.Interface(
|
|
120 |
"""
|
121 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
122 |
"""
|
123 |
-
chatbot_main = gr.Chatbot(label="Extraction Output")
|
124 |
-
chatbot_main_input = gr.MultimodalTextbox({"text": "Choose the referred material(s) and ask your question.", "files":[]})
|
125 |
-
chatbot_sys_output = gr.Textbox(value="You are a friendly Chatbot.", label="System Message")
|
126 |
-
chatbot_max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max. New Tokens")
|
127 |
-
chatbot_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
128 |
-
chatbot_top_p = gr.Slider(
|
129 |
-
minimum=0.1,
|
130 |
-
maximum=1.0,
|
131 |
-
value=0.95,
|
132 |
-
step=0.05,
|
133 |
-
label="Top-p (nucleus sampling)",
|
134 |
-
)
|
135 |
|
136 |
chat_interface = gr.ChatInterface(
|
137 |
respond,
|
@@ -157,7 +161,7 @@ with gr.Blocks() as demo:
|
|
157 |
"""
|
158 |
def update_task_input(task_input_choice):
|
159 |
if task_input_choice == "transcribe":
|
160 |
-
return gr.Textbox(label="Transcribed Output") #Audio(sources="upload", label="Audio: from file")
|
161 |
elif task_input_choice == "translate":
|
162 |
return gr.Textbox(label="Translated Output") #Audio(sources="microphone", label="Audio: from microphone")
|
163 |
|
@@ -167,10 +171,6 @@ with gr.Blocks() as demo:
|
|
167 |
outputs=task_output
|
168 |
)
|
169 |
|
170 |
-
def on_selected_tab(selected_tab):
|
171 |
-
print(f"Selected tab: {selected_tab['value']}, Selected state: {selected_tab['selected']}")
|
172 |
-
tabs.select(on_selected_tab)
|
173 |
-
|
174 |
|
175 |
if __name__ == "__main__":
|
176 |
demo.queue().launch() #demo.launch()
|
|
|
90 |
raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
|
91 |
|
92 |
text = asr_pl(asr_inputs, batch_size=ASR_BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
|
93 |
+
return [text, {"text": text, "files":[]}]
|
94 |
|
95 |
|
96 |
"""Gradio User Interface"""
|
97 |
#audio_input = gr.Audio(sources="upload", type="filepath", label="Audio: from file") #gr.Audio(sources="microphone", type="filepath", label="Audio: from microphone")
|
98 |
#audio_input_choice = gr.Radio(["audio file", "microphone"], label="Audio Input Source", value="audio file") #
|
99 |
|
100 |
+
# (transcribe) Interface components
|
101 |
audio_input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Audio Input Source")
|
102 |
task_input_choice = gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")
|
103 |
task_output = gr.Textbox(label="Transcribed Output")
|
104 |
|
105 |
+
# ChatInterface components
|
106 |
+
chatbot_main = gr.Chatbot(label="Extraction Output")
|
107 |
+
chatbot_main_input = gr.MultimodalTextbox({"text": "Choose the referred material(s) and ask your question.", "files":[]})
|
108 |
+
chatbot_sys_output = gr.Textbox(value="You are a friendly Chatbot.", label="System Message")
|
109 |
+
chatbot_max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max. New Tokens")
|
110 |
+
chatbot_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
111 |
+
chatbot_top_p = gr.Slider(
|
112 |
+
minimum=0.1,
|
113 |
+
maximum=1.0,
|
114 |
+
value=0.95,
|
115 |
+
step=0.05,
|
116 |
+
label="Top-p (nucleus sampling)",
|
117 |
+
)
|
118 |
+
|
119 |
transcribe_interface = gr.Interface(
|
120 |
fn=transcribe,
|
121 |
inputs=[
|
|
|
125 |
],
|
126 |
outputs=[
|
127 |
task_output, #"text",
|
128 |
+
chatbot_main_input,
|
129 |
],
|
130 |
title=application_title,
|
131 |
description=application_description,
|
|
|
136 |
"""
|
137 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
138 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
|
140 |
chat_interface = gr.ChatInterface(
|
141 |
respond,
|
|
|
161 |
"""
|
162 |
def update_task_input(task_input_choice):
|
163 |
if task_input_choice == "transcribe":
|
164 |
+
return gr.Textbox(label="Transcribed Output") #Audio(sources="upload", label="Audio: from file")
|
165 |
elif task_input_choice == "translate":
|
166 |
return gr.Textbox(label="Translated Output") #Audio(sources="microphone", label="Audio: from microphone")
|
167 |
|
|
|
171 |
outputs=task_output
|
172 |
)
|
173 |
|
|
|
|
|
|
|
|
|
174 |
|
175 |
if __name__ == "__main__":
|
176 |
demo.queue().launch() #demo.launch()
|