demoPOC commited on
Commit
b6a2d35
·
verified ·
1 Parent(s): 3e6d9da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -42,8 +42,8 @@ from langchain import PromptTemplate
42
 
43
  from langchain.chains import RetrievalQA
44
  #from langchain.memory import ConversationBufferWindowMemory
45
- #from langchain.memory import ConversationBufferMemory
46
- from langchain.memory import ConversationMemory
47
 
48
  from transformers import LlamaTokenizer, AutoTokenizer
49
 
@@ -212,7 +212,7 @@ def getRAGChain(customerName, customerDistrict, custDetailsPresent, vectordb,llm
212
  global memory
213
  #memory = ConversationBufferWindowMemory(k=3, memory_key="history", input_key="question", initial_memory=conversation_history)
214
  #memory = ConversationBufferMemory(k=3, memory_key="history", input_key="query", initial_memory=conversation_history)
215
- memory = ConversationMemory()
216
 
217
  # chain = RetrievalQA.from_chain_type(
218
  # llm=getLLMModel(llmID),
 
42
 
43
  from langchain.chains import RetrievalQA
44
  #from langchain.memory import ConversationBufferWindowMemory
45
+ from langchain.memory import ConversationBufferMemory
46
+
47
 
48
  from transformers import LlamaTokenizer, AutoTokenizer
49
 
 
212
  global memory
213
  #memory = ConversationBufferWindowMemory(k=3, memory_key="history", input_key="question", initial_memory=conversation_history)
214
  #memory = ConversationBufferMemory(k=3, memory_key="history", input_key="query", initial_memory=conversation_history)
215
+ memory = ConversationBufferMemory()
216
 
217
  # chain = RetrievalQA.from_chain_type(
218
  # llm=getLLMModel(llmID),