Update app.py
Browse files
app.py
CHANGED
@@ -177,7 +177,7 @@ def demo():
|
|
177 |
|
178 |
# document = list_file_obj
|
179 |
vector_db, collection_name = initialize_database(list_file_obj)
|
180 |
-
|
181 |
# #upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
|
182 |
# db_btn.click(initialize_database, \
|
183 |
# inputs=[document], \
|
@@ -190,8 +190,15 @@ def demo():
|
|
190 |
# outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
191 |
# queue=False)
|
192 |
|
193 |
-
qachain = initialize_LLM(vector_db)
|
194 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
# Chatbot events
|
196 |
msg.submit(conversation, \
|
197 |
inputs=[qa_chain, msg, chatbot], \
|
|
|
177 |
|
178 |
# document = list_file_obj
|
179 |
vector_db, collection_name = initialize_database(list_file_obj)
|
180 |
+
print(len(vector_db))
|
181 |
# #upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
|
182 |
# db_btn.click(initialize_database, \
|
183 |
# inputs=[document], \
|
|
|
190 |
# outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
191 |
# queue=False)
|
192 |
|
193 |
+
#qachain = initialize_LLM(vector_db)
|
194 |
+
llm = HuggingFaceHub(repo_id = llm_model,model_kwargs={"temperature": temperature,
|
195 |
+
"max_new_tokens": max_tokens,
|
196 |
+
"top_k": top_k,
|
197 |
+
"load_in_8bit": True})
|
198 |
+
retriever=vector_db.as_retriever()
|
199 |
+
memory = ConversationBufferMemory(memory_key="chat_history", output_key='answer', return_messages=True)
|
200 |
+
qa_chain = ConversationalRetrievalChain.from_llm(llm,retriever=retriever,chain_type="stuff",
|
201 |
+
memory=memory,return_source_documents=True,verbose=False,)
|
202 |
# Chatbot events
|
203 |
msg.submit(conversation, \
|
204 |
inputs=[qa_chain, msg, chatbot], \
|