demoPOC commited on
Commit
3591403
·
verified ·
1 Parent(s): 0a7f858

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -3
app.py CHANGED
@@ -35,7 +35,7 @@ from langchain.docstore.document import Document
35
  from langchain.vectorstores import Chroma
36
  from langchain.text_splitter import RecursiveCharacterTextSplitter
37
 
38
- from langchain.chains import VectorDBQA
39
 
40
  from langchain.document_loaders import UnstructuredFileLoader, TextLoader
41
  from langchain import PromptTemplate
@@ -212,7 +212,7 @@ def getRAGChain(customerName, customerDistrict, custDetailsPresent, vectordb,llm
212
  global memory
213
  #memory = ConversationBufferWindowMemory(k=3, memory_key="history", input_key="question", initial_memory=conversation_history)
214
  #memory = ConversationBufferMemory(k=3, memory_key="history", input_key="query", initial_memory=conversation_history)
215
- memory = ConversationBufferMemory(input_key="query")
216
 
217
  # chain = RetrievalQA.from_chain_type(
218
  # llm=getLLMModel(llmID),
@@ -229,7 +229,20 @@ def getRAGChain(customerName, customerDistrict, custDetailsPresent, vectordb,llm
229
  # input_key="question"),
230
  # }
231
  # )
232
- chain = RetrievalQA.from_chain_type(
 
 
 
 
 
 
 
 
 
 
 
 
 
233
  llm=getLLMModel(llmID),
234
  chain_type='stuff',
235
  retriever=getRetriever(vectordb),
 
35
  from langchain.vectorstores import Chroma
36
  from langchain.text_splitter import RecursiveCharacterTextSplitter
37
 
38
+ from langchain.chains import VectorDBQA, ConversationChain
39
 
40
  from langchain.document_loaders import UnstructuredFileLoader, TextLoader
41
  from langchain import PromptTemplate
 
212
  global memory
213
  #memory = ConversationBufferWindowMemory(k=3, memory_key="history", input_key="question", initial_memory=conversation_history)
214
  #memory = ConversationBufferMemory(k=3, memory_key="history", input_key="query", initial_memory=conversation_history)
215
+ memory = ConversationBufferMemory()
216
 
217
  # chain = RetrievalQA.from_chain_type(
218
  # llm=getLLMModel(llmID),
 
229
  # input_key="question"),
230
  # }
231
  # )
232
+ # chain = RetrievalQA.from_chain_type(
233
+ # llm=getLLMModel(llmID),
234
+ # chain_type='stuff',
235
+ # retriever=getRetriever(vectordb),
236
+ # memory=memory,
237
+ # #retriever=vectordb.as_retriever(),
238
+ # verbose=True,
239
+ # chain_type_kwargs={
240
+ # "verbose": False,
241
+ # "prompt": createPrompt(customerName, customerDistrict, custDetailsPresent),
242
+ # "memory": memory
243
+ # }
244
+ # )
245
+ chain = ConversationChain(
246
  llm=getLLMModel(llmID),
247
  chain_type='stuff',
248
  retriever=getRetriever(vectordb),