barghavani commited on
Commit
60b1010
·
verified ·
1 Parent(s): c132dd6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -18
app.py CHANGED
@@ -209,31 +209,36 @@ def format_chat_history(message, chat_history):
209
  return formatted_chat_history
210
 
211
 
212
- def conversation(qa_chain, message, history):
 
 
 
 
 
 
213
  formatted_chat_history = format_chat_history(message, history)
214
- #print("formatted_chat_history",formatted_chat_history)
215
-
216
  # Generate response using QA chain
217
- response = qa_chain({"question": message, "chat_history": formatted_chat_history})
218
- response_answer = response["answer"]
219
- if response_answer.find("Helpful Answer:") != -1:
220
  response_answer = response_answer.split("Helpful Answer:")[-1]
221
- response_sources = response["source_documents"]
222
- response_source1 = response_sources[0].page_content.strip()
223
- response_source2 = response_sources[1].page_content.strip()
224
- response_source3 = response_sources[2].page_content.strip()
225
- # Langchain sources are zero-based
226
- response_source1_page = response_sources[0].metadata["page"] + 1
227
- response_source2_page = response_sources[1].metadata["page"] + 1
228
- response_source3_page = response_sources[2].metadata["page"] + 1
229
- # print ('chat response: ', response_answer)
230
- # print('DB source', response_sources)
231
 
232
  # Append user message and response to chat history
233
  new_history = history + [(message, response_answer)]
234
- # return gr.update(value=""), new_history, response_sources[0], response_sources[1]
235
- return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
236
 
 
 
237
 
238
  def upload_file(file_obj):
239
  list_file_path = []
 
209
  return formatted_chat_history
210
 
211
 
212
+ def conversation(vector_db, message, history):
213
+ # Initialize ChatOpenAI with your desired settings
214
+ chat_open_ai = ChatOpenAI(temperature=0.9, model_name="gpt-3.5-turbo")
215
+ # Assuming vector_db is already initialized correctly
216
+ pdf_ga = ChatVectorDBChain.from_llm(chat_open_ai, vector_db, return_source_documents=True)
217
+
218
+ # Format the chat history for input to the model
219
  formatted_chat_history = format_chat_history(message, history)
220
+
 
221
  # Generate response using QA chain
222
+ result = pdf_ga({"question": message, "chat_history": formatted_chat_history})
223
+ response_answer = result["answer"]
224
+ if "Helpful Answer:" in response_answer:
225
  response_answer = response_answer.split("Helpful Answer:")[-1]
226
+
227
+ response_sources = result["source_documents"]
228
+ response_source1 = response_sources[0].page_content.strip() if len(response_sources) > 0 else "No source available"
229
+ response_source2 = response_sources[1].page_content.strip() if len(response_sources) > 1 else "No source available"
230
+ response_source3 = response_sources[2].page_content.strip() if len(response_sources) > 2 else "No source available"
231
+
232
+ # Langchain sources are zero-based; adjust pages if sources are available
233
+ response_source1_page = response_sources[0].metadata["page"] + 1 if len(response_sources) > 0 else "N/A"
234
+ response_source2_page = response_sources[1].metadata["page"] + 1 if len(response_sources) > 1 else "N/A"
235
+ response_source3_page = response_sources[2].metadata["page"] + 1 if len(response_sources) > 2 else "N/A"
236
 
237
  # Append user message and response to chat history
238
  new_history = history + [(message, response_answer)]
 
 
239
 
240
+ return gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
241
+
242
 
243
  def upload_file(file_obj):
244
  list_file_path = []