Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ import torch
|
|
2 |
import spaces
|
3 |
|
4 |
import gradio as gr
|
|
|
5 |
from threading import Thread
|
6 |
import re
|
7 |
import time
|
@@ -69,6 +70,7 @@ asr_pl = pipeline(
|
|
69 |
application_title = "Enlight Innovations Limited -- Demo"
|
70 |
application_description = "This demo is designed to illustrate our basic ideas and feasibility in implementation."
|
71 |
|
|
|
72 |
@spaces.GPU
|
73 |
def respond(
|
74 |
message,
|
@@ -107,7 +109,7 @@ def respond(
|
|
107 |
response += token
|
108 |
yield response
|
109 |
|
110 |
-
|
111 |
@spaces.GPU
|
112 |
def transcribe(asr_inputs, task):
|
113 |
#print("Type: " + str(type(asr_inputs)))
|
@@ -117,30 +119,69 @@ def transcribe(asr_inputs, task):
|
|
117 |
text = asr_pl(asr_inputs, batch_size=ASR_BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
|
118 |
return text.strip()
|
119 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
"""Gradio User Interface"""
|
122 |
#audio_input = gr.Audio(sources="upload", type="filepath", label="Audio: from file") #gr.Audio(sources="microphone", type="filepath", label="Audio: from microphone")
|
123 |
#audio_input_choice = gr.Radio(["audio file", "microphone"], label="Audio Input Source", value="audio file") #
|
124 |
|
125 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
audio_input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Audio Input Source")
|
127 |
task_input_choice = gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")
|
128 |
task_output = gr.Textbox(label="Transcribed Output")
|
129 |
|
130 |
-
#
|
131 |
-
chatbot_main = gr.Chatbot(label="Extraction Output")
|
132 |
-
chatbot_main_input = gr.MultimodalTextbox({"text": "Choose the referred material(s) and ask your question.", "files":[]})
|
133 |
-
chatbot_sys_output = gr.Textbox(value="You are a friendly Chatbot.", label="System Message")
|
134 |
-
chatbot_max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max. New Tokens")
|
135 |
-
chatbot_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.1, step=0.1, label="Temperature")
|
136 |
-
chatbot_top_p = gr.Slider(
|
137 |
-
minimum=0.1,
|
138 |
-
maximum=1.0,
|
139 |
-
value=0.95,
|
140 |
-
step=0.05,
|
141 |
-
label="Top-p (nucleus sampling)",
|
142 |
-
)
|
143 |
-
|
144 |
transcribe_interface = gr.Interface(
|
145 |
fn=transcribe,
|
146 |
inputs=[
|
@@ -156,6 +197,19 @@ transcribe_interface = gr.Interface(
|
|
156 |
allow_flagging="never",
|
157 |
)
|
158 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
|
160 |
"""
|
161 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
@@ -177,7 +231,7 @@ chat_interface = gr.ChatInterface(
|
|
177 |
)
|
178 |
|
179 |
with gr.Blocks() as demo:
|
180 |
-
gr.TabbedInterface([transcribe_interface, chat_interface], ["Step 1: Transcribe", "Step 2: Extract"])
|
181 |
|
182 |
"""
|
183 |
def clear_audio_input():
|
|
|
2 |
import spaces
|
3 |
|
4 |
import gradio as gr
|
5 |
+
import pandas as pd
|
6 |
from threading import Thread
|
7 |
import re
|
8 |
import time
|
|
|
70 |
application_title = "Enlight Innovations Limited -- Demo"
|
71 |
application_description = "This demo is designed to illustrate our basic ideas and feasibility in implementation."
|
72 |
|
73 |
+
# Chatbot Interface functions
|
74 |
@spaces.GPU
|
75 |
def respond(
|
76 |
message,
|
|
|
109 |
response += token
|
110 |
yield response
|
111 |
|
112 |
+
# Transcribe Interface functions
|
113 |
@spaces.GPU
|
114 |
def transcribe(asr_inputs, task):
|
115 |
#print("Type: " + str(type(asr_inputs)))
|
|
|
119 |
text = asr_pl(asr_inputs, batch_size=ASR_BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
|
120 |
return text.strip()
|
121 |
|
122 |
+
# Profile Interface functions
|
123 |
+
def load_profiles():
|
124 |
+
try:
|
125 |
+
return pd.read_csv("temp_profiles.csv")
|
126 |
+
except FileNotFoundError:
|
127 |
+
return pd.DataFrame()
|
128 |
+
|
129 |
+
def save_profile(profile_data):
|
130 |
+
df = load_profiles()
|
131 |
+
df = df.append(profile_data, ignore_index=True)
|
132 |
+
df.to_csv("temp_profiles.csv", index=False)
|
133 |
+
|
134 |
+
def lookup_profile():
|
135 |
+
df = load_profiles()
|
136 |
+
assessment_id = assessment_id_input.value.strip()
|
137 |
+
if not assessment_id:
|
138 |
+
status.update("Please enter an Assessment ID", color="red")
|
139 |
+
return
|
140 |
+
|
141 |
+
results = df[df["Assessment_ID"].str.contains(assessment_id, case=False)]
|
142 |
+
if results.empty:
|
143 |
+
status.update("No profiles found for this ID", color="red")
|
144 |
+
return
|
145 |
+
|
146 |
+
profile_preview.update(
|
147 |
+
value=results.to_markdown(index=False)
|
148 |
+
)
|
149 |
+
status.update("Profile(s) found!", color="green")
|
150 |
|
151 |
"""Gradio User Interface"""
|
152 |
#audio_input = gr.Audio(sources="upload", type="filepath", label="Audio: from file") #gr.Audio(sources="microphone", type="filepath", label="Audio: from microphone")
|
153 |
#audio_input_choice = gr.Radio(["audio file", "microphone"], label="Audio Input Source", value="audio file") #
|
154 |
|
155 |
+
# Profile Interface components
|
156 |
+
with gr.Blocks() as profile_interface:
|
157 |
+
# Profile Lookup Section
|
158 |
+
with gr.Column():
|
159 |
+
assessment_id_input = gr.Textbox(
|
160 |
+
label="Assessment Object/Session ID",
|
161 |
+
placeholder="Enter ID here...",
|
162 |
+
required=True
|
163 |
+
)
|
164 |
+
lookup_btn = gr.Button("Lookup Profile", variant="primary")
|
165 |
+
clear_btn = gr.Button("Clear Results", variant="secondary")
|
166 |
+
status = gr.Status(elem_classes="status-container")
|
167 |
+
profile_preview = gr.Markdown(label="Profile Results", value="")
|
168 |
+
|
169 |
+
# Event Bindings
|
170 |
+
lookup_btn.click(lookup_profile, None, None)
|
171 |
+
clear_btn.click(lambda: profile_preview.update(""), status.update("", color=""))
|
172 |
+
assessment_id_input.change(lambda: status.update("", color=""), None, None)
|
173 |
+
|
174 |
+
# Load existing profiles on startup
|
175 |
+
load_profiles()
|
176 |
+
|
177 |
+
# Profile Interface
|
178 |
+
|
179 |
+
# Transcribe Interface components
|
180 |
audio_input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Audio Input Source")
|
181 |
task_input_choice = gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")
|
182 |
task_output = gr.Textbox(label="Transcribed Output")
|
183 |
|
184 |
+
# Transcribe Interface
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
transcribe_interface = gr.Interface(
|
186 |
fn=transcribe,
|
187 |
inputs=[
|
|
|
197 |
allow_flagging="never",
|
198 |
)
|
199 |
|
200 |
+
# ChatInterface components
|
201 |
+
chatbot_main = gr.Chatbot(label="Extraction Output")
|
202 |
+
chatbot_main_input = gr.MultimodalTextbox({"text": "Choose the referred material(s) and ask your question.", "files":[]})
|
203 |
+
chatbot_sys_output = gr.Textbox(value="You are a friendly Chatbot.", label="System Message")
|
204 |
+
chatbot_max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max. New Tokens")
|
205 |
+
chatbot_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.1, step=0.1, label="Temperature")
|
206 |
+
chatbot_top_p = gr.Slider(
|
207 |
+
minimum=0.1,
|
208 |
+
maximum=1.0,
|
209 |
+
value=0.95,
|
210 |
+
step=0.05,
|
211 |
+
label="Top-p (nucleus sampling)",
|
212 |
+
)
|
213 |
|
214 |
"""
|
215 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
|
|
231 |
)
|
232 |
|
233 |
with gr.Blocks() as demo:
|
234 |
+
gr.TabbedInterface([profile_interface, transcribe_interface, chat_interface], ["Step 0: Profile", "Step 1: Transcribe", "Step 2: Extract"])
|
235 |
|
236 |
"""
|
237 |
def clear_audio_input():
|