Pijush2023 commited on
Commit
775f179
·
verified ·
1 Parent(s): 6dd076c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -27,12 +27,16 @@ from langchain_openai import OpenAIEmbeddings
27
  from langchain_pinecone import PineconeVectorStore
28
  from langchain.chains import RetrievalQA
29
  import asyncio
 
30
 
31
  from langchain.globals import set_llm_cache
32
  from langchain_openai import OpenAI
33
- from langchain.cache import InMemoryCache
34
  from langchain.globals import set_llm_cache
35
 
 
 
 
36
  # Initialize and set the cache
37
  set_llm_cache(InMemoryCache())
38
 
@@ -176,7 +180,7 @@ def handle_mode_selection(mode, chat_history, question):
176
  chat_history.append((question, "")) # Append user question with an empty response initially
177
 
178
  # Get response from Pinecone using the qa_chain
179
- response = qa_chain({"query": question, "context": ""})
180
  response_text = response['result']
181
 
182
  # Stream each character in the response text to the chat history
 
27
  from langchain_pinecone import PineconeVectorStore
28
  from langchain.chains import RetrievalQA
29
  import asyncio
30
+ import warnings
31
 
32
  from langchain.globals import set_llm_cache
33
  from langchain_openai import OpenAI
34
+ from langchain_community.cache import InMemoryCache
35
  from langchain.globals import set_llm_cache
36
 
37
+ # Suppress warnings from LangChain specifically
38
+ warnings.filterwarnings("ignore", module="langchain")
39
+
40
  # Initialize and set the cache
41
  set_llm_cache(InMemoryCache())
42
 
 
180
  chat_history.append((question, "")) # Append user question with an empty response initially
181
 
182
  # Get response from Pinecone using the qa_chain
183
+ response = qa_chain.invoke({"query": question, "context": ""})
184
  response_text = response['result']
185
 
186
  # Stream each character in the response text to the chat history