Pijush2023 commited on
Commit
de0f6aa
·
verified ·
1 Parent(s): 17d0825

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -15
app.py CHANGED
@@ -26,6 +26,7 @@ import threading
26
  from langchain_openai import OpenAIEmbeddings
27
  from langchain_pinecone import PineconeVectorStore
28
  from langchain.chains import RetrievalQA
 
29
 
30
  embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
31
  def initialize_gpt_model():
@@ -36,7 +37,7 @@ gpt_model = initialize_gpt_model()
36
 
37
  gpt_embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
38
  gpt_vectorstore = PineconeVectorStore(index_name="radardata11122024", embedding=gpt_embeddings)
39
- gpt_retriever = gpt_vectorstore.as_retriever(search_kwargs={'k': 5})
40
 
41
 
42
 
@@ -47,7 +48,7 @@ pc = Pinecone(api_key=os.environ['PINECONE_API_KEY'])
47
 
48
  index_name ="radardata11122024"
49
  vectorstore = PineconeVectorStore(index_name=index_name, embedding=embeddings)
50
- retriever = vectorstore.as_retriever(search_kwargs={'k': 5})
51
 
52
  chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model='gpt-4o')
53
 
@@ -134,19 +135,15 @@ def generate_audio_elevenlabs(text):
134
  import time
135
 
136
  # Main function to handle mode selection with character-by-character streaming
137
- def handle_mode_selection(mode, chat_history, question):
138
  if mode == "Normal Chatbot":
139
- chat_history.append((question, "")) # Append user question with an empty response initially
140
-
141
- # Get response from Pinecone using the qa_chain
142
- response = qa_chain({"query": question, "context": ""})
143
  response_text = response['result']
144
-
145
- # Stream each character in the response text to the chat history
146
  for i, char in enumerate(response_text):
147
- chat_history[-1] = (question, chat_history[-1][1] + char) # Update the last message
148
- yield chat_history, "", None # Yield updated chat history
149
- time.sleep(0.05) # Small delay to simulate streaming
150
 
151
  elif mode == "Voice to Voice Conversation":
152
  response_text = qa_chain({"query": question, "context": ""})['result']
@@ -188,9 +185,7 @@ def generate_audio_from_last_response(history):
188
  return generate_audio_elevenlabs(recent_response)
189
  return None
190
 
191
- # Function to insert the prompt into the textbox when clicked
192
- #def insert_prompt(current_text, prompt):
193
- #return prompt[0] if prompt else current_text
194
 
195
 
196
  # Define the ASR model with Whisper
 
26
  from langchain_openai import OpenAIEmbeddings
27
  from langchain_pinecone import PineconeVectorStore
28
  from langchain.chains import RetrievalQA
29
+ import asyncio
30
 
31
  embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
32
  def initialize_gpt_model():
 
37
 
38
  gpt_embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
39
  gpt_vectorstore = PineconeVectorStore(index_name="radardata11122024", embedding=gpt_embeddings)
40
+ gpt_retriever = gpt_vectorstore.as_retriever(search_kwargs={'k': 2})
41
 
42
 
43
 
 
48
 
49
  index_name ="radardata11122024"
50
  vectorstore = PineconeVectorStore(index_name=index_name, embedding=embeddings)
51
+ retriever = vectorstore.as_retriever(search_kwargs={'k': 2})
52
 
53
  chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model='gpt-4o')
54
 
 
135
  import time
136
 
137
  # Main function to handle mode selection with character-by-character streaming
138
+ async def async_handle_mode_selection(mode, chat_history, question):
139
  if mode == "Normal Chatbot":
140
+ chat_history.append((question, ""))
141
+ response = await async_get_retriever_result(question, retriever)
 
 
142
  response_text = response['result']
 
 
143
  for i, char in enumerate(response_text):
144
+ chat_history[-1] = (question, chat_history[-1][1] + char)
145
+ yield chat_history, "", None
146
+ await asyncio.sleep(0.05)
147
 
148
  elif mode == "Voice to Voice Conversation":
149
  response_text = qa_chain({"query": question, "context": ""})['result']
 
185
  return generate_audio_elevenlabs(recent_response)
186
  return None
187
 
188
+
 
 
189
 
190
 
191
  # Define the ASR model with Whisper