Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,9 +3,9 @@ import os
|
|
3 |
import logging
|
4 |
from langchain_core.prompts import ChatPromptTemplate
|
5 |
from langchain_core.output_parsers import StrOutputParser
|
6 |
-
from langchain_nvidia_ai_endpoints import ChatNVIDIA
|
7 |
from langchain_community.graphs import Neo4jGraph
|
8 |
-
from
|
|
|
9 |
from pydantic import BaseModel, Field
|
10 |
from langchain_core.messages import AIMessage, HumanMessage
|
11 |
from langchain_core.runnables import (
|
@@ -15,26 +15,24 @@ from langchain_core.runnables import (
|
|
15 |
RunnableParallel,
|
16 |
)
|
17 |
from langchain_core.prompts.prompt import PromptTemplate
|
18 |
-
import requests
|
19 |
import tempfile
|
20 |
-
from langchain.memory import ConversationBufferWindowMemory
|
21 |
import time
|
22 |
-
import
|
23 |
-
from langchain.chains import ConversationChain
|
24 |
import torch
|
25 |
-
import torchaudio
|
26 |
-
from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
|
27 |
import numpy as np
|
28 |
-
import
|
29 |
-
from
|
|
|
30 |
|
|
|
|
|
31 |
|
32 |
-
#
|
33 |
conversational_memory = ConversationBufferWindowMemory(
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
|
39 |
# Setup Neo4j
|
40 |
graph = Neo4jGraph(
|
@@ -43,14 +41,9 @@ graph = Neo4jGraph(
|
|
43 |
password="Z10duoPkKCtENuOukw3eIlvl0xJWKtrVSr-_hGX1LQ4"
|
44 |
)
|
45 |
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
llm = ChatNVIDIA(
|
51 |
-
base_url="https://integrate.api.nvidia.com/v1",
|
52 |
-
model="meta/llama-3.1-8b-instruct"
|
53 |
-
)
|
54 |
|
55 |
# Define entity extraction and retrieval functions
|
56 |
class Entities(BaseModel):
|
@@ -63,7 +56,6 @@ entity_prompt = ChatPromptTemplate.from_messages([
|
|
63 |
("human", "Use the given format to extract information from the following input: {question}"),
|
64 |
])
|
65 |
|
66 |
-
#chat_model = ChatOpenAI(temperature=0, model_name="gpt-4o", api_key=os.environ['OPENAI_API_KEY'])
|
67 |
entity_chain = entity_prompt | llm.with_structured_output(Entities)
|
68 |
|
69 |
def remove_lucene_chars(input: str) -> str:
|
@@ -82,9 +74,6 @@ def generate_full_text_query(input: str) -> str:
|
|
82 |
full_text_query += f" {words[-1]}~2"
|
83 |
return full_text_query.strip()
|
84 |
|
85 |
-
# Setup logging to a file to capture debug information
|
86 |
-
logging.basicConfig(filename='neo4j_retrieval.log', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
|
87 |
-
|
88 |
def structured_retriever(question: str) -> str:
|
89 |
result = ""
|
90 |
entities = entity_chain.invoke({"question": question})
|
@@ -113,7 +102,7 @@ def retriever_neo4j(question: str):
|
|
113 |
logging.debug(f"Structured data: {structured_data}")
|
114 |
return structured_data
|
115 |
|
116 |
-
#
|
117 |
_template = """Given the following conversation and a follow-up question, rephrase the follow-up question to be a standalone question,
|
118 |
in its original language.
|
119 |
Chat History:
|
@@ -145,78 +134,47 @@ _search_query = RunnableBranch(
|
|
145 |
RunnableLambda(lambda x: x["question"]),
|
146 |
)
|
147 |
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
|
149 |
-
|
150 |
-
#Ask your question directly, and I'll provide a precise and quick,short and crisp response in a conversational way without any Greet.
|
151 |
-
#{context}
|
152 |
-
#Question: {question}
|
153 |
-
#Answer:"""
|
154 |
-
|
155 |
-
|
156 |
-
# Define the ChatPromptTemplate
|
157 |
-
prompt = ChatPromptTemplate.from_messages(
|
158 |
-
[
|
159 |
-
(
|
160 |
-
"system",
|
161 |
-
"I am a guide for Birmingham, Alabama. I can provide recommendations and insights about the city, including events and activities. Ask your question directly, and I'll provide a precise and quick, short and crisp response in a conversational way without any Greet."
|
162 |
-
),
|
163 |
-
MessagesPlaceholder(variable_name="chat_history"),
|
164 |
-
("user", "{input}"),
|
165 |
-
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
166 |
-
]
|
167 |
-
)
|
168 |
|
169 |
# Define the chain for Neo4j-based retrieval and response generation
|
170 |
chain_neo4j = (
|
171 |
RunnableParallel(
|
172 |
{
|
173 |
-
"
|
174 |
-
"
|
175 |
-
"agent_scratchpad": retriever_neo4j, # Use the retriever to get additional context
|
176 |
}
|
177 |
)
|
178 |
-
|
|
179 |
-
| llm
|
180 |
-
| StrOutputParser()
|
181 |
)
|
182 |
|
183 |
# Define the function to get the response
|
184 |
-
def get_response(
|
185 |
try:
|
186 |
-
|
187 |
-
return chain_neo4j.invoke({
|
188 |
-
"input": input_text, # User's question as 'input'
|
189 |
-
"chat_history": [], # Replace with actual chat history if available
|
190 |
-
"agent_scratchpad": "" # Placeholder for any additional data
|
191 |
-
})
|
192 |
except Exception as e:
|
|
|
193 |
return f"Error: {str(e)}"
|
194 |
|
195 |
-
|
196 |
-
|
197 |
# Define the function to clear input and output
|
198 |
def clear_fields():
|
199 |
-
return [],"",None
|
200 |
|
201 |
# Function to generate audio with Eleven Labs TTS
|
202 |
def generate_audio_elevenlabs(text):
|
203 |
XI_API_KEY = os.environ['ELEVENLABS_API']
|
204 |
VOICE_ID = 'ehbJzYLQFpwbJmGkqbnW'
|
205 |
tts_url = f"https://api.elevenlabs.io/v1/text-to-speech/{VOICE_ID}/stream"
|
206 |
-
headers = {
|
207 |
-
|
208 |
-
"xi-api-key": XI_API_KEY
|
209 |
-
}
|
210 |
-
data = {
|
211 |
-
"text": str(text),
|
212 |
-
"model_id": "eleven_multilingual_v2",
|
213 |
-
"voice_settings": {
|
214 |
-
"stability": 1.0,
|
215 |
-
"similarity_boost": 0.0,
|
216 |
-
"style": 0.60,
|
217 |
-
"use_speaker_boost": False
|
218 |
-
}
|
219 |
-
}
|
220 |
response = requests.post(tts_url, headers=headers, json=data, stream=True)
|
221 |
if response.ok:
|
222 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as f:
|
@@ -225,175 +183,32 @@ def generate_audio_elevenlabs(text):
|
|
225 |
f.write(chunk)
|
226 |
audio_path = f.name
|
227 |
logging.debug(f"Audio saved to {audio_path}")
|
228 |
-
return audio_path # Return audio path for
|
229 |
else:
|
230 |
logging.error(f"Error generating audio: {response.text}")
|
231 |
return None
|
232 |
|
233 |
-
# Function to add a user's message to the chat history and clear the input box
|
234 |
-
def add_message(history, message):
|
235 |
-
if message.strip():
|
236 |
-
history.append((message, None)) # Add the user's message to the chat history only if it's not empty
|
237 |
-
return history, "" # Clear the input box
|
238 |
-
|
239 |
-
# Define function to generate a streaming response
|
240 |
-
def chat_with_bot(messages):
|
241 |
-
user_message = messages[-1][0] # Get the last user message (input)
|
242 |
-
messages[-1] = (user_message, "") # Prepare the placeholder for the bot's response
|
243 |
-
|
244 |
-
response = get_response(user_message)
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
# Simulate streaming response by iterating over each character in the response
|
249 |
-
for character in response:
|
250 |
-
messages[-1] = (user_message, messages[-1][1] + character)
|
251 |
-
yield messages # Stream each character
|
252 |
-
time.sleep(0.05) # Adjust delay as needed for real-time effect
|
253 |
-
|
254 |
-
yield messages # Final yield to ensure the full response is displayed
|
255 |
-
|
256 |
-
|
257 |
-
# Function to generate audio with Eleven Labs TTS from the last bot response
|
258 |
-
def generate_audio_from_last_response(history):
|
259 |
-
# Get the most recent bot response from the chat history
|
260 |
-
if history and len(history) > 0:
|
261 |
-
recent_response = history[-1][1] # The second item in the tuple is the bot response text
|
262 |
-
if recent_response:
|
263 |
-
return generate_audio_elevenlabs(recent_response)
|
264 |
-
return None
|
265 |
-
|
266 |
-
# Define example prompts
|
267 |
-
examples = [
|
268 |
-
["What are some popular events in Birmingham?"],
|
269 |
-
["Who are the top players of the Crimson Tide?"],
|
270 |
-
["Where can I find a hamburger?"],
|
271 |
-
["What are some popular tourist attractions in Birmingham?"],
|
272 |
-
["What are some good clubs in Birmingham?"],
|
273 |
-
["Is there a farmer's market or craft fair in Birmingham, Alabama?"],
|
274 |
-
["Are there any special holiday events or parades in Birmingham, Alabama, during December?"],
|
275 |
-
["What are the best places to enjoy live music in Birmingham, Alabama?"]
|
276 |
-
|
277 |
-
]
|
278 |
-
|
279 |
-
# Function to insert the prompt into the textbox when clicked
|
280 |
-
def insert_prompt(current_text, prompt):
|
281 |
-
return prompt[0] if prompt else current_text
|
282 |
-
|
283 |
-
|
284 |
-
# Define the ASR model with Whisper
|
285 |
-
model_id = 'openai/whisper-large-v3'
|
286 |
-
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
287 |
-
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
288 |
-
model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype).to(device)
|
289 |
-
processor = AutoProcessor.from_pretrained(model_id)
|
290 |
-
|
291 |
-
pipe_asr = pipeline(
|
292 |
-
"automatic-speech-recognition",
|
293 |
-
model=model,
|
294 |
-
tokenizer=processor.tokenizer,
|
295 |
-
feature_extractor=processor.feature_extractor,
|
296 |
-
max_new_tokens=128,
|
297 |
-
chunk_length_s=15,
|
298 |
-
batch_size=16,
|
299 |
-
torch_dtype=torch_dtype,
|
300 |
-
device=device,
|
301 |
-
return_timestamps=True
|
302 |
-
)
|
303 |
-
|
304 |
-
# Define the function to reset the state after 10 seconds
|
305 |
-
def auto_reset_state():
|
306 |
-
time.sleep(5)
|
307 |
-
return None, "" # Reset the state and clear input text
|
308 |
-
|
309 |
-
|
310 |
-
def transcribe_function(stream, new_chunk):
|
311 |
-
try:
|
312 |
-
sr, y = new_chunk[0], new_chunk[1]
|
313 |
-
except TypeError:
|
314 |
-
print(f"Error chunk structure: {type(new_chunk)}, content: {new_chunk}")
|
315 |
-
return stream, "", None
|
316 |
-
|
317 |
-
# Ensure y is not empty and is at least 1-dimensional
|
318 |
-
if y is None or len(y) == 0:
|
319 |
-
return stream, "", None
|
320 |
-
|
321 |
-
y = y.astype(np.float32)
|
322 |
-
max_abs_y = np.max(np.abs(y))
|
323 |
-
if max_abs_y > 0:
|
324 |
-
y = y / max_abs_y
|
325 |
-
|
326 |
-
# Ensure stream is also at least 1-dimensional before concatenation
|
327 |
-
if stream is not None and len(stream) > 0:
|
328 |
-
stream = np.concatenate([stream, y])
|
329 |
-
else:
|
330 |
-
stream = y
|
331 |
-
|
332 |
-
# Process the audio data for transcription
|
333 |
-
result = pipe_asr({"array": stream, "sampling_rate": sr}, return_timestamps=False)
|
334 |
-
full_text = result.get("text", "")
|
335 |
-
|
336 |
-
# Start a thread to reset the state after 10 seconds
|
337 |
-
threading.Thread(target=auto_reset_state).start()
|
338 |
-
|
339 |
-
return stream, full_text, full_text
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
# Define the function to clear the state and input text
|
344 |
-
def clear_transcription_state():
|
345 |
-
return None, ""
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
# Create the Gradio Blocks interface
|
350 |
with gr.Blocks(theme="rawrsor1/Everforest") as demo:
|
351 |
chatbot = gr.Chatbot([], elem_id="RADAR", bubble_full_width=False)
|
352 |
with gr.Row():
|
353 |
with gr.Column():
|
354 |
-
|
355 |
-
audio_input = gr.Audio(sources=["microphone"],streaming=True,type='numpy',every=0.1,label="Speak to Ask")
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
with gr.Column():
|
360 |
-
audio_output = gr.Audio(label="Audio", type="filepath",autoplay=True,interactive=False)
|
361 |
-
|
362 |
with gr.Row():
|
363 |
with gr.Column():
|
364 |
get_response_btn = gr.Button("Get Response")
|
365 |
-
with gr.Column():
|
366 |
-
clear_state_btn = gr.Button("Clear State")
|
367 |
with gr.Column():
|
368 |
generate_audio_btn = gr.Button("Generate Audio")
|
369 |
with gr.Column():
|
370 |
-
|
371 |
-
|
372 |
-
with gr.Row():
|
373 |
-
with gr.Column():
|
374 |
-
gr.Markdown("<h1 style='color: red;'>Example Prompts</h1>", elem_id="Example-Prompts")
|
375 |
-
gr.Examples(examples=examples, fn=insert_prompt, inputs=question_input, outputs=question_input,api_name="api_insert_example")
|
376 |
-
|
377 |
-
# Define interactions
|
378 |
-
# Define interactions for clicking the button
|
379 |
-
get_response_btn.click(fn=add_message, inputs=[chatbot, question_input], outputs=[chatbot, question_input],api_name="api_add_message_on_button_click")\
|
380 |
-
.then(fn=chat_with_bot, inputs=[chatbot], outputs=chatbot,api_name="api_get response_on_button")
|
381 |
-
# Define interaction for hitting the Enter key
|
382 |
-
question_input.submit(fn=add_message, inputs=[chatbot, question_input], outputs=[chatbot, question_input],api_name="api_add_message_on _enter")\
|
383 |
-
.then(fn=chat_with_bot, inputs=[chatbot], outputs=chatbot,api_name="api_get response_on_enter")
|
384 |
-
|
385 |
-
# Speech-to-Text functionality
|
386 |
-
state = gr.State()
|
387 |
-
audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, question_input],api_name="api_voice_to_text")
|
388 |
-
|
389 |
-
|
390 |
-
generate_audio_btn.click(fn=generate_audio_from_last_response, inputs=chatbot, outputs=audio_output,api_name="api_generate_text_to_audio")
|
391 |
-
clean_btn.click(fn=clear_fields, inputs=[], outputs=[chatbot, question_input, audio_output],api_name="api_clear_textbox")
|
392 |
-
|
393 |
-
|
394 |
-
# Clear state interaction
|
395 |
-
clear_state_btn.click(fn=clear_transcription_state, outputs=[question_input, state],api_name="api_clean_state_transcription")
|
396 |
|
|
|
|
|
|
|
|
|
397 |
|
398 |
# Launch the Gradio interface
|
399 |
-
demo.launch(show_error=True)
|
|
|
3 |
import logging
|
4 |
from langchain_core.prompts import ChatPromptTemplate
|
5 |
from langchain_core.output_parsers import StrOutputParser
|
|
|
6 |
from langchain_community.graphs import Neo4jGraph
|
7 |
+
from langchain_groq import ChatGroq
|
8 |
+
from langchain.chains import GraphCypherQAChain
|
9 |
from pydantic import BaseModel, Field
|
10 |
from langchain_core.messages import AIMessage, HumanMessage
|
11 |
from langchain_core.runnables import (
|
|
|
15 |
RunnableParallel,
|
16 |
)
|
17 |
from langchain_core.prompts.prompt import PromptTemplate
|
|
|
18 |
import tempfile
|
|
|
19 |
import time
|
20 |
+
import threading
|
|
|
21 |
import torch
|
|
|
|
|
22 |
import numpy as np
|
23 |
+
import requests
|
24 |
+
from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
|
25 |
+
|
26 |
|
27 |
+
# Setup logging to a file to capture debug information
|
28 |
+
logging.basicConfig(filename='neo4j_retrieval.log', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
|
29 |
|
30 |
+
# Setup for conversational memory
|
31 |
conversational_memory = ConversationBufferWindowMemory(
|
32 |
+
memory_key='chat_history',
|
33 |
+
k=10,
|
34 |
+
return_messages=True
|
35 |
+
)
|
36 |
|
37 |
# Setup Neo4j
|
38 |
graph = Neo4jGraph(
|
|
|
41 |
password="Z10duoPkKCtENuOukw3eIlvl0xJWKtrVSr-_hGX1LQ4"
|
42 |
)
|
43 |
|
44 |
+
# Setup the Groq model
|
45 |
+
groq_api_key = os.getenv('GROQ_API_KEY')
|
46 |
+
llm = ChatGroq(groq_api_key=groq_api_key, model_name="Gemma2-9b-It")
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
# Define entity extraction and retrieval functions
|
49 |
class Entities(BaseModel):
|
|
|
56 |
("human", "Use the given format to extract information from the following input: {question}"),
|
57 |
])
|
58 |
|
|
|
59 |
entity_chain = entity_prompt | llm.with_structured_output(Entities)
|
60 |
|
61 |
def remove_lucene_chars(input: str) -> str:
|
|
|
74 |
full_text_query += f" {words[-1]}~2"
|
75 |
return full_text_query.strip()
|
76 |
|
|
|
|
|
|
|
77 |
def structured_retriever(question: str) -> str:
|
78 |
result = ""
|
79 |
entities = entity_chain.invoke({"question": question})
|
|
|
102 |
logging.debug(f"Structured data: {structured_data}")
|
103 |
return structured_data
|
104 |
|
105 |
+
# Condense follow-up questions to standalone
|
106 |
_template = """Given the following conversation and a follow-up question, rephrase the follow-up question to be a standalone question,
|
107 |
in its original language.
|
108 |
Chat History:
|
|
|
134 |
RunnableLambda(lambda x: x["question"]),
|
135 |
)
|
136 |
|
137 |
+
# Define the prompt for response generation
|
138 |
+
template = """I am a guide for Birmingham, Alabama. I can provide recommendations and insights about the city, including events and activities.
|
139 |
+
Ask your question directly, and I'll provide a precise, short, and crisp response in a conversational way without any greeting.
|
140 |
+
{context}
|
141 |
+
Question: {question}
|
142 |
+
Answer:"""
|
143 |
|
144 |
+
qa_prompt = ChatPromptTemplate.from_template(template)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
|
146 |
# Define the chain for Neo4j-based retrieval and response generation
|
147 |
chain_neo4j = (
|
148 |
RunnableParallel(
|
149 |
{
|
150 |
+
"context": _search_query | retriever_neo4j,
|
151 |
+
"question": RunnablePassthrough(),
|
|
|
152 |
}
|
153 |
)
|
154 |
+
| qa_prompt
|
155 |
+
| llm
|
156 |
+
| StrOutputParser()
|
157 |
)
|
158 |
|
159 |
# Define the function to get the response
|
160 |
+
def get_response(question):
|
161 |
try:
|
162 |
+
return chain_neo4j.invoke({"question": question})
|
|
|
|
|
|
|
|
|
|
|
163 |
except Exception as e:
|
164 |
+
logging.error(f"Error generating response: {str(e)}")
|
165 |
return f"Error: {str(e)}"
|
166 |
|
|
|
|
|
167 |
# Define the function to clear input and output
|
168 |
def clear_fields():
|
169 |
+
return [], "", None
|
170 |
|
171 |
# Function to generate audio with Eleven Labs TTS
|
172 |
def generate_audio_elevenlabs(text):
|
173 |
XI_API_KEY = os.environ['ELEVENLABS_API']
|
174 |
VOICE_ID = 'ehbJzYLQFpwbJmGkqbnW'
|
175 |
tts_url = f"https://api.elevenlabs.io/v1/text-to-speech/{VOICE_ID}/stream"
|
176 |
+
headers = {"Accept": "application/json", "xi-api-key": XI_API_KEY}
|
177 |
+
data = {"text": str(text), "model_id": "eleven_multilingual_v2", "voice_settings": {"stability": 1.0}}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
response = requests.post(tts_url, headers=headers, json=data, stream=True)
|
179 |
if response.ok:
|
180 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as f:
|
|
|
183 |
f.write(chunk)
|
184 |
audio_path = f.name
|
185 |
logging.debug(f"Audio saved to {audio_path}")
|
186 |
+
return audio_path # Return audio path for playback
|
187 |
else:
|
188 |
logging.error(f"Error generating audio: {response.text}")
|
189 |
return None
|
190 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
# Create the Gradio Blocks interface
|
192 |
with gr.Blocks(theme="rawrsor1/Everforest") as demo:
|
193 |
chatbot = gr.Chatbot([], elem_id="RADAR", bubble_full_width=False)
|
194 |
with gr.Row():
|
195 |
with gr.Column():
|
196 |
+
question_input = gr.Textbox(label="Ask a Question", placeholder="Type your question here...")
|
|
|
|
|
|
|
|
|
197 |
with gr.Column():
|
198 |
+
audio_output = gr.Audio(label="Audio", type="filepath", autoplay=True, interactive=False)
|
199 |
+
|
200 |
with gr.Row():
|
201 |
with gr.Column():
|
202 |
get_response_btn = gr.Button("Get Response")
|
|
|
|
|
203 |
with gr.Column():
|
204 |
generate_audio_btn = gr.Button("Generate Audio")
|
205 |
with gr.Column():
|
206 |
+
clear_state_btn = gr.Button("Clear State")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
|
208 |
+
# Define interactions for buttons
|
209 |
+
get_response_btn.click(fn=get_response, inputs=question_input, outputs=chatbot)
|
210 |
+
generate_audio_btn.click(fn=generate_audio_elevenlabs, inputs=chatbot, outputs=audio_output)
|
211 |
+
clear_state_btn.click(fn=clear_fields, outputs=[chatbot, question_input, audio_output])
|
212 |
|
213 |
# Launch the Gradio interface
|
214 |
+
demo.launch(show_error=True)
|