Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -40,10 +40,7 @@ gpt_vectorstore = PineconeVectorStore(index_name="radardata11122024", embedding=
|
|
40 |
gpt_retriever = gpt_vectorstore.as_retriever(search_kwargs={'k': 2})
|
41 |
|
42 |
|
43 |
-
|
44 |
-
search_kwargs = {'k': k}
|
45 |
-
results = await retriever.as_retriever(search_kwargs=search_kwargs)(query)
|
46 |
-
return results
|
47 |
|
48 |
# Pinecone setup
|
49 |
from pinecone import Pinecone
|
@@ -139,15 +136,19 @@ def generate_audio_elevenlabs(text):
|
|
139 |
import time
|
140 |
|
141 |
# Main function to handle mode selection with character-by-character streaming
|
142 |
-
|
143 |
if mode == "Normal Chatbot":
|
144 |
-
chat_history.append((question, ""))
|
145 |
-
|
|
|
|
|
146 |
response_text = response['result']
|
|
|
|
|
147 |
for i, char in enumerate(response_text):
|
148 |
-
chat_history[-1] = (question, chat_history[-1][1] + char)
|
149 |
-
yield chat_history, "", None
|
150 |
-
|
151 |
|
152 |
elif mode == "Voice to Voice Conversation":
|
153 |
response_text = qa_chain({"query": question, "context": ""})['result']
|
@@ -291,7 +292,7 @@ with gr.Blocks(theme="rawrsor1/Everforest") as demo:
|
|
291 |
|
292 |
# Define interactions for the Get Response button
|
293 |
get_response_btn.click(
|
294 |
-
fn=
|
295 |
inputs=[mode_selection, chatbot, question_input],
|
296 |
outputs=[chatbot, question_input, audio_output],
|
297 |
api_name="api_add_message_on_button_click"
|
@@ -301,7 +302,7 @@ with gr.Blocks(theme="rawrsor1/Everforest") as demo:
|
|
301 |
|
302 |
|
303 |
question_input.submit(
|
304 |
-
fn=
|
305 |
inputs=[mode_selection, chatbot, question_input],
|
306 |
outputs=[chatbot, question_input, audio_output],
|
307 |
api_name="api_add_message_on_enter"
|
@@ -309,7 +310,7 @@ with gr.Blocks(theme="rawrsor1/Everforest") as demo:
|
|
309 |
|
310 |
|
311 |
submit_voice_btn.click(
|
312 |
-
fn=
|
313 |
inputs=[mode_selection, chatbot, question_input],
|
314 |
outputs=[chatbot, question_input, audio_output],
|
315 |
api_name="api_voice_to_voice_translation"
|
|
|
40 |
gpt_retriever = gpt_vectorstore.as_retriever(search_kwargs={'k': 2})
|
41 |
|
42 |
|
43 |
+
|
|
|
|
|
|
|
44 |
|
45 |
# Pinecone setup
|
46 |
from pinecone import Pinecone
|
|
|
136 |
import time
|
137 |
|
138 |
# Main function to handle mode selection with character-by-character streaming
|
139 |
+
def handle_mode_selection(mode, chat_history, question):
|
140 |
if mode == "Normal Chatbot":
|
141 |
+
chat_history.append((question, "")) # Append user question with an empty response initially
|
142 |
+
|
143 |
+
# Get response from Pinecone using the qa_chain
|
144 |
+
response = qa_chain({"query": question, "context": ""})
|
145 |
response_text = response['result']
|
146 |
+
|
147 |
+
# Stream each character in the response text to the chat history
|
148 |
for i, char in enumerate(response_text):
|
149 |
+
chat_history[-1] = (question, chat_history[-1][1] + char) # Update the last message
|
150 |
+
yield chat_history, "", None # Yield updated chat history
|
151 |
+
time.sleep(0.05) # Small delay to simulate streaming
|
152 |
|
153 |
elif mode == "Voice to Voice Conversation":
|
154 |
response_text = qa_chain({"query": question, "context": ""})['result']
|
|
|
292 |
|
293 |
# Define interactions for the Get Response button
|
294 |
get_response_btn.click(
|
295 |
+
fn=handle_mode_selection,
|
296 |
inputs=[mode_selection, chatbot, question_input],
|
297 |
outputs=[chatbot, question_input, audio_output],
|
298 |
api_name="api_add_message_on_button_click"
|
|
|
302 |
|
303 |
|
304 |
question_input.submit(
|
305 |
+
fn=handle_mode_selection,
|
306 |
inputs=[mode_selection, chatbot, question_input],
|
307 |
outputs=[chatbot, question_input, audio_output],
|
308 |
api_name="api_add_message_on_enter"
|
|
|
310 |
|
311 |
|
312 |
submit_voice_btn.click(
|
313 |
+
fn=handle_mode_selection,
|
314 |
inputs=[mode_selection, chatbot, question_input],
|
315 |
outputs=[chatbot, question_input, audio_output],
|
316 |
api_name="api_voice_to_voice_translation"
|