Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -209,31 +209,36 @@ def format_chat_history(message, chat_history):
|
|
209 |
return formatted_chat_history
|
210 |
|
211 |
|
212 |
-
def conversation(
|
|
|
|
|
|
|
|
|
|
|
|
|
213 |
formatted_chat_history = format_chat_history(message, history)
|
214 |
-
|
215 |
-
|
216 |
# Generate response using QA chain
|
217 |
-
|
218 |
-
response_answer =
|
219 |
-
if
|
220 |
response_answer = response_answer.split("Helpful Answer:")[-1]
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
|
232 |
# Append user message and response to chat history
|
233 |
new_history = history + [(message, response_answer)]
|
234 |
-
# return gr.update(value=""), new_history, response_sources[0], response_sources[1]
|
235 |
-
return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
|
236 |
|
|
|
|
|
237 |
|
238 |
def upload_file(file_obj):
|
239 |
list_file_path = []
|
|
|
209 |
return formatted_chat_history
|
210 |
|
211 |
|
212 |
+
def conversation(vector_db, message, history):
|
213 |
+
# Initialize ChatOpenAI with your desired settings
|
214 |
+
chat_open_ai = ChatOpenAI(temperature=0.9, model_name="gpt-3.5-turbo")
|
215 |
+
# Assuming vector_db is already initialized correctly
|
216 |
+
pdf_ga = ChatVectorDBChain.from_llm(chat_open_ai, vector_db, return_source_documents=True)
|
217 |
+
|
218 |
+
# Format the chat history for input to the model
|
219 |
formatted_chat_history = format_chat_history(message, history)
|
220 |
+
|
|
|
221 |
# Generate response using QA chain
|
222 |
+
result = pdf_ga({"question": message, "chat_history": formatted_chat_history})
|
223 |
+
response_answer = result["answer"]
|
224 |
+
if "Helpful Answer:" in response_answer:
|
225 |
response_answer = response_answer.split("Helpful Answer:")[-1]
|
226 |
+
|
227 |
+
response_sources = result["source_documents"]
|
228 |
+
response_source1 = response_sources[0].page_content.strip() if len(response_sources) > 0 else "No source available"
|
229 |
+
response_source2 = response_sources[1].page_content.strip() if len(response_sources) > 1 else "No source available"
|
230 |
+
response_source3 = response_sources[2].page_content.strip() if len(response_sources) > 2 else "No source available"
|
231 |
+
|
232 |
+
# Langchain sources are zero-based; adjust pages if sources are available
|
233 |
+
response_source1_page = response_sources[0].metadata["page"] + 1 if len(response_sources) > 0 else "N/A"
|
234 |
+
response_source2_page = response_sources[1].metadata["page"] + 1 if len(response_sources) > 1 else "N/A"
|
235 |
+
response_source3_page = response_sources[2].metadata["page"] + 1 if len(response_sources) > 2 else "N/A"
|
236 |
|
237 |
# Append user message and response to chat history
|
238 |
new_history = history + [(message, response_answer)]
|
|
|
|
|
239 |
|
240 |
+
return gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
|
241 |
+
|
242 |
|
243 |
def upload_file(file_obj):
|
244 |
list_file_path = []
|