Update app.py
Browse files
app.py
CHANGED
@@ -67,16 +67,7 @@ def load_db():
|
|
67 |
embedding_function=embedding)
|
68 |
return vectordb
|
69 |
|
70 |
-
|
71 |
-
llm = HuggingFaceHub(repo_id = llm_model,
|
72 |
-
model_kwargs={"temperature": temperature,
|
73 |
-
"max_new_tokens": max_tokens,
|
74 |
-
"top_k": top_k,
|
75 |
-
"load_in_8bit": True})
|
76 |
-
retriever=vector_db.as_retriever()
|
77 |
-
memory = ConversationBufferMemory(memory_key="chat_history", output_key='answer', return_messages=True)
|
78 |
-
qa_chain = ConversationalRetrievalChain.from_llm(llm_model,retriever=retriever,chain_type="stuff",
|
79 |
-
memory=memory,return_source_documents=True,verbose=False,)
|
80 |
|
81 |
|
82 |
|
@@ -154,6 +145,17 @@ def conversation(qa_chain, message, history):
|
|
154 |
vector_db, collection_name = initialize_database(list_file_obj)
|
155 |
#qa_chain =
|
156 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
print('qa chain and vector_db done')
|
158 |
|
159 |
def demo():
|
|
|
67 |
embedding_function=embedding)
|
68 |
return vectordb
|
69 |
|
70 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
|
73 |
|
|
|
145 |
vector_db, collection_name = initialize_database(list_file_obj)
|
146 |
#qa_chain =
|
147 |
|
148 |
+
# Initialize langchain LLM chain
|
149 |
+
llm = HuggingFaceHub(repo_id = llm_model,
|
150 |
+
model_kwargs={"temperature": temperature,
|
151 |
+
"max_new_tokens": max_tokens,
|
152 |
+
"top_k": top_k,
|
153 |
+
"load_in_8bit": True})
|
154 |
+
retriever=vector_db.as_retriever()
|
155 |
+
memory = ConversationBufferMemory(memory_key="chat_history", output_key='answer', return_messages=True)
|
156 |
+
qa_chain = ConversationalRetrievalChain.from_llm(llm_model,retriever=retriever,chain_type="stuff",
|
157 |
+
memory=memory,return_source_documents=True,verbose=False,)
|
158 |
+
|
159 |
print('qa chain and vector_db done')
|
160 |
|
161 |
def demo():
|