Update app.py
Browse files
app.py
CHANGED
@@ -139,30 +139,33 @@ def conversation(qa_chain, message, history):
|
|
139 |
# Append user message and response to chat history
|
140 |
new_history = history + [(message, response_answer)]
|
141 |
# return gr.update(value=""), new_history, response_sources[0], response_sources[1]
|
142 |
-
return qa_chain,
|
143 |
|
144 |
#document = os.listdir(list_file_obj)
|
145 |
-
vector_db, collection_name = initialize_database(list_file_obj)
|
146 |
#qa_chain =
|
147 |
|
148 |
-
# Initialize langchain LLM chain
|
149 |
-
llm = HuggingFaceHub(repo_id = llm_model,
|
150 |
-
model_kwargs={"temperature": temperature,
|
151 |
-
"max_new_tokens": max_tokens,
|
152 |
-
"top_k": top_k,
|
153 |
-
"load_in_8bit": True})
|
154 |
-
retriever=vector_db.as_retriever()
|
155 |
-
memory = ConversationBufferMemory(memory_key="chat_history", output_key='answer', return_messages=True)
|
156 |
-
qa_chain = ConversationalRetrievalChain.from_llm(llm,retriever=retriever,chain_type="stuff",
|
157 |
-
memory=memory,return_source_documents=True,verbose=False,)
|
158 |
|
159 |
-
|
160 |
|
161 |
def demo():
|
162 |
with gr.Blocks(theme='base') as demo:
|
163 |
-
|
164 |
-
|
165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
|
167 |
chatbot = gr.Chatbot(height=300)
|
168 |
with gr.Accordion('References', open=True):
|
|
|
139 |
# Append user message and response to chat history
|
140 |
new_history = history + [(message, response_answer)]
|
141 |
# return gr.update(value=""), new_history, response_sources[0], response_sources[1]
|
142 |
+
return qa_chain, new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
|
143 |
|
144 |
#document = os.listdir(list_file_obj)
|
|
|
145 |
#qa_chain =
|
146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
|
148 |
+
|
149 |
|
150 |
def demo():
|
151 |
with gr.Blocks(theme='base') as demo:
|
152 |
+
vector_db = gr.State()
|
153 |
+
qa_chain = gr.State()
|
154 |
+
collection_name = gr.State()
|
155 |
+
|
156 |
+
|
157 |
+
vector_db, collection_name = initialize_database(list_file_obj)
|
158 |
+
|
159 |
+
# Initialize langchain LLM chain
|
160 |
+
llm = HuggingFaceHub(repo_id = llm_model,
|
161 |
+
model_kwargs={"temperature": temperature,
|
162 |
+
"max_new_tokens": max_tokens,
|
163 |
+
"top_k": top_k,
|
164 |
+
"load_in_8bit": True})
|
165 |
+
retriever=vector_db.as_retriever()
|
166 |
+
memory = ConversationBufferMemory(memory_key="chat_history", output_key='answer', return_messages=True)
|
167 |
+
qa_chain = ConversationalRetrievalChain.from_llm(llm,retriever=retriever,chain_type="stuff",
|
168 |
+
memory=memory,return_source_documents=True,verbose=False,)
|
169 |
|
170 |
chatbot = gr.Chatbot(height=300)
|
171 |
with gr.Accordion('References', open=True):
|