Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -40,7 +40,8 @@ warnings.filterwarnings("ignore", module="langchain")
|
|
40 |
# Initialize and set the cache
|
41 |
set_llm_cache(InMemoryCache())
|
42 |
|
43 |
-
|
|
|
44 |
|
45 |
#index_name ="radardata11122024"
|
46 |
#index_name="radarclintcountrymusic11152024"
|
@@ -48,14 +49,14 @@ index_name="radarmasterdataset11252024"
|
|
48 |
|
49 |
embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
|
50 |
def initialize_gpt_model():
|
51 |
-
return ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model=
|
52 |
|
53 |
gpt_model = initialize_gpt_model()
|
54 |
|
55 |
|
56 |
gpt_embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
|
57 |
gpt_vectorstore = PineconeVectorStore(index_name=index_name, embedding=gpt_embeddings)
|
58 |
-
gpt_retriever = gpt_vectorstore.as_retriever(search_kwargs={'k':
|
59 |
|
60 |
|
61 |
|
@@ -67,16 +68,18 @@ pc = Pinecone(api_key=os.environ['PINECONE_API_KEY'])
|
|
67 |
|
68 |
|
69 |
vectorstore = PineconeVectorStore(index_name=index_name, embedding=embeddings)
|
70 |
-
retriever = vectorstore.as_retriever(search_kwargs={'k':
|
71 |
|
72 |
-
chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model=
|
73 |
|
74 |
#code for history
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
|
|
|
|
80 |
|
81 |
# template =f"""Hello there! As your friendly and knowledgeable guide here in Birmingham, Alabama.Give the short ,precise,crisp and straight-foreward response of maximum 2 sentences and dont greet.
|
82 |
# {{context}}
|
@@ -111,7 +114,6 @@ conversational_memory = ConversationBufferWindowMemory(
|
|
111 |
|
112 |
template = f"""You’re Annie, a charming media personality created by Amit Lamba, guiding people around Birmingham, Alabama. Like Rick Steves or Anthony Bourdain, you’re here to give friendly, concise, and accurate advice with a dash of humor.
|
113 |
Keep answers relevant, upbeat, and engaging, spotlighting partner businesses whenever possible. Be conversational, like chatting with a clever friend, and encourage follow-up questions to create a smooth flow. Make users smile and deliver a delightful, informative experience—like a perfect dessert after a great meal.
|
114 |
-
|
115 |
{{context}}
|
116 |
Question: {{question}}
|
117 |
Helpful Answer:"""
|
@@ -175,20 +177,29 @@ def generate_audio_elevenlabs(text):
|
|
175 |
|
176 |
import time
|
177 |
|
178 |
-
# Main function to handle mode selection with character-by-character streaming
|
179 |
def handle_mode_selection(mode, chat_history, question):
|
180 |
if mode == "Normal Chatbot":
|
181 |
-
|
|
|
|
|
182 |
|
183 |
-
# Get
|
184 |
-
|
|
|
|
|
|
|
185 |
response_text = response['result']
|
186 |
|
187 |
-
#
|
|
|
|
|
|
|
188 |
for i, char in enumerate(response_text):
|
189 |
-
chat_history[-1] = (question, chat_history[-1][1] + char)
|
190 |
-
yield chat_history, "", None
|
191 |
-
time.sleep(0.05) #
|
|
|
|
|
192 |
|
193 |
elif mode == "Voice to Voice Conversation":
|
194 |
response_text = qa_chain({"query": question, "context": ""})['result']
|
@@ -197,7 +208,6 @@ def handle_mode_selection(mode, chat_history, question):
|
|
197 |
|
198 |
|
199 |
|
200 |
-
|
201 |
# Function to add a user's message to the chat history and clear the input box
|
202 |
def add_message(history, message):
|
203 |
if message.strip():
|
@@ -389,4 +399,4 @@ with gr.Blocks(theme="rawrsor1/Everforest") as demo:
|
|
389 |
)
|
390 |
|
391 |
# Launch the Gradio interface
|
392 |
-
demo.launch(show_error=True)
|
|
|
40 |
# Initialize and set the cache
|
41 |
set_llm_cache(InMemoryCache())
|
42 |
|
43 |
+
#model='gpt-3.5-turbo'
|
44 |
+
model='gpt-4o-mini'
|
45 |
|
46 |
#index_name ="radardata11122024"
|
47 |
#index_name="radarclintcountrymusic11152024"
|
|
|
49 |
|
50 |
embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
|
51 |
def initialize_gpt_model():
|
52 |
+
return ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model=model)
|
53 |
|
54 |
gpt_model = initialize_gpt_model()
|
55 |
|
56 |
|
57 |
gpt_embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
|
58 |
gpt_vectorstore = PineconeVectorStore(index_name=index_name, embedding=gpt_embeddings)
|
59 |
+
gpt_retriever = gpt_vectorstore.as_retriever(search_kwargs={'k': 1})
|
60 |
|
61 |
|
62 |
|
|
|
68 |
|
69 |
|
70 |
vectorstore = PineconeVectorStore(index_name=index_name, embedding=embeddings)
|
71 |
+
retriever = vectorstore.as_retriever(search_kwargs={'k': 1})
|
72 |
|
73 |
+
chat_model = ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model=model)
|
74 |
|
75 |
#code for history
|
76 |
+
memory = ConversationBufferWindowMemory(k=3)
|
77 |
+
conversation=ConversationChain(
|
78 |
+
llm=chat_model,
|
79 |
+
memory=memory,
|
80 |
+
verbose=True
|
81 |
+
)
|
82 |
+
|
83 |
|
84 |
# template =f"""Hello there! As your friendly and knowledgeable guide here in Birmingham, Alabama.Give the short ,precise,crisp and straight-foreward response of maximum 2 sentences and dont greet.
|
85 |
# {{context}}
|
|
|
114 |
|
115 |
template = f"""You’re Annie, a charming media personality created by Amit Lamba, guiding people around Birmingham, Alabama. Like Rick Steves or Anthony Bourdain, you’re here to give friendly, concise, and accurate advice with a dash of humor.
|
116 |
Keep answers relevant, upbeat, and engaging, spotlighting partner businesses whenever possible. Be conversational, like chatting with a clever friend, and encourage follow-up questions to create a smooth flow. Make users smile and deliver a delightful, informative experience—like a perfect dessert after a great meal.
|
|
|
117 |
{{context}}
|
118 |
Question: {{question}}
|
119 |
Helpful Answer:"""
|
|
|
177 |
|
178 |
import time
|
179 |
|
|
|
180 |
def handle_mode_selection(mode, chat_history, question):
|
181 |
if mode == "Normal Chatbot":
|
182 |
+
# Use memory to store history
|
183 |
+
memory.save_context({"input": question}, {"output": ""})
|
184 |
+
chat_history.append((question, "")) # Add user's question
|
185 |
|
186 |
+
# Get the context from memory
|
187 |
+
context = memory.load_memory_variables({}).get("history", "")
|
188 |
+
|
189 |
+
# Use QA chain to get the response
|
190 |
+
response = qa_chain.invoke({"query": question, "context": context})
|
191 |
response_text = response['result']
|
192 |
|
193 |
+
# Update memory with the bot's response
|
194 |
+
memory.save_context({"input": question}, {"output": response_text})
|
195 |
+
|
196 |
+
# Stream each character in the response text
|
197 |
for i, char in enumerate(response_text):
|
198 |
+
chat_history[-1] = (question, chat_history[-1][1] + char)
|
199 |
+
yield chat_history, "", None
|
200 |
+
time.sleep(0.05) # Simulate streaming
|
201 |
+
|
202 |
+
yield chat_history, "", None
|
203 |
|
204 |
elif mode == "Voice to Voice Conversation":
|
205 |
response_text = qa_chain({"query": question, "context": ""})['result']
|
|
|
208 |
|
209 |
|
210 |
|
|
|
211 |
# Function to add a user's message to the chat history and clear the input box
|
212 |
def add_message(history, message):
|
213 |
if message.strip():
|
|
|
399 |
)
|
400 |
|
401 |
# Launch the Gradio interface
|
402 |
+
demo.launch(show_error=True)
|