Shreyas094 commited on
Commit
ad3bc0b
·
verified ·
1 Parent(s): 8800142

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -43
app.py CHANGED
@@ -18,9 +18,7 @@ from huggingface_hub import InferenceClient
18
  import inspect
19
  import logging
20
  import shutil
21
- from langchain.chains import ConversationalRetrievalChain
22
- from langchain.memory import ConversationBufferMemory
23
- from langchain.llms import HuggingFaceHub
24
 
25
  # Set up basic configuration for logging
26
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
@@ -283,16 +281,18 @@ class CitingSources(BaseModel):
283
  ...,
284
  description="List of sources to cite. Should be an URL of the source."
285
  )
286
- def chatbot_interface(message, history, use_web_search, model, temperature, num_calls, selected_docs):
287
  if not message.strip():
288
  return "", history
289
 
290
  history = history + [(message, "")]
291
 
292
  try:
293
- for response in respond(message, history, model, temperature, num_calls, use_web_search, selected_docs):
294
  history[-1] = (message, response)
295
  yield history
 
 
296
  except Exception as e:
297
  logging.error(f"Unexpected error in chatbot_interface: {str(e)}")
298
  history[-1] = (message, f"An unexpected error occurred: {str(e)}")
@@ -307,65 +307,52 @@ def retry_last_response(history, use_web_search, model, temperature, num_calls):
307
 
308
  return chatbot_interface(last_user_msg, history, use_web_search, model, temperature, num_calls)
309
 
310
- def create_conversation_chain(model_name, vector_store, temperature=0.5):
311
- # Create a HuggingFaceHub instance
312
- llm = HuggingFaceHub(
313
- repo_id=model_name,
314
- model_kwargs={
315
- "temperature": temperature
316
- },
317
- huggingfacehub_api_token=huggingface_token # Ensure this variable is defined in your environment
318
- )
319
-
320
- memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
321
-
322
- chain = ConversationalRetrievalChain.from_llm(
323
- llm=llm,
324
- retriever=vector_store.as_retriever(),
325
- memory=memory
326
- )
327
-
328
- return chain
329
-
330
  def respond(message, history, model, temperature, num_calls, use_web_search, selected_docs):
331
  logging.info(f"User Query: {message}")
332
  logging.info(f"Model Used: {model}")
333
  logging.info(f"Search Type: {'Web Search' if use_web_search else 'PDF Search'}")
 
334
  logging.info(f"Selected Documents: {selected_docs}")
335
 
336
  try:
337
  if use_web_search:
338
  for main_content, sources in get_response_with_search(message, model, num_calls=num_calls, temperature=temperature):
339
  response = f"{main_content}\n\n{sources}"
 
 
340
  yield response
341
  else:
342
  embed = get_embeddings()
343
  if os.path.exists("faiss_database"):
344
  database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
 
345
 
346
  # Filter relevant documents based on user selection
347
- filtered_docs = [doc for doc in database.docstore._dict.values()
348
- if isinstance(doc, Document) and doc.metadata.get("source") in selected_docs]
349
 
350
- if not filtered_docs:
351
  yield "No relevant information found in the selected documents. Please try selecting different documents or rephrasing your query."
352
  return
353
-
354
- # Create a new FAISS index with only the selected documents
355
- filtered_db = FAISS.from_documents(filtered_docs, embed)
356
-
357
- # Create the conversation chain
358
- chain = create_conversation_chain(model, filtered_db, temperature)
359
-
360
- # Generate response
361
- response = chain({"question": message})
362
-
363
- yield response['answer']
364
  else:
365
- logging.warning("No FAISS database found")
366
  yield "No documents available. Please upload PDF documents to answer questions."
367
  return
368
-
 
 
 
 
 
 
 
 
 
 
 
 
369
  except Exception as e:
370
  logging.error(f"Error with {model}: {str(e)}")
371
  if "microsoft/Phi-3-mini-4k-instruct" in model:
@@ -583,7 +570,7 @@ use_web_search = gr.Checkbox(label="Use Web Search", value=True)
583
  custom_placeholder = "Ask a question (Note: You can toggle between Web Search and PDF Chat in Additional Inputs below)"
584
 
585
  demo = gr.ChatInterface(
586
- chatbot_interface,
587
  additional_inputs=[
588
  gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[3]),
589
  gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
@@ -671,4 +658,4 @@ with demo:
671
  )
672
 
673
  if __name__ == "__main__":
674
- demo.launch(share=True)
 
18
  import inspect
19
  import logging
20
  import shutil
21
+
 
 
22
 
23
  # Set up basic configuration for logging
24
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
 
281
  ...,
282
  description="List of sources to cite. Should be an URL of the source."
283
  )
284
+ def chatbot_interface(message, history, use_web_search, model, temperature, num_calls):
285
  if not message.strip():
286
  return "", history
287
 
288
  history = history + [(message, "")]
289
 
290
  try:
291
+ for response in respond(message, history, model, temperature, num_calls, use_web_search):
292
  history[-1] = (message, response)
293
  yield history
294
+ except gr.CancelledError:
295
+ yield history
296
  except Exception as e:
297
  logging.error(f"Unexpected error in chatbot_interface: {str(e)}")
298
  history[-1] = (message, f"An unexpected error occurred: {str(e)}")
 
307
 
308
  return chatbot_interface(last_user_msg, history, use_web_search, model, temperature, num_calls)
309
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
  def respond(message, history, model, temperature, num_calls, use_web_search, selected_docs):
311
  logging.info(f"User Query: {message}")
312
  logging.info(f"Model Used: {model}")
313
  logging.info(f"Search Type: {'Web Search' if use_web_search else 'PDF Search'}")
314
+
315
  logging.info(f"Selected Documents: {selected_docs}")
316
 
317
  try:
318
  if use_web_search:
319
  for main_content, sources in get_response_with_search(message, model, num_calls=num_calls, temperature=temperature):
320
  response = f"{main_content}\n\n{sources}"
321
+ first_line = response.split('\n')[0] if response else ''
322
+ # logging.info(f"Generated Response (first line): {first_line}")
323
  yield response
324
  else:
325
  embed = get_embeddings()
326
  if os.path.exists("faiss_database"):
327
  database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
328
+ retriever = database.as_retriever(search_kwargs={"k": 20})
329
 
330
  # Filter relevant documents based on user selection
331
+ all_relevant_docs = retriever.get_relevant_documents(message)
332
+ relevant_docs = [doc for doc in all_relevant_docs if doc.metadata["source"] in selected_docs]
333
 
334
+ if not relevant_docs:
335
  yield "No relevant information found in the selected documents. Please try selecting different documents or rephrasing your query."
336
  return
337
+
338
+ context_str = "\n".join([doc.page_content for doc in relevant_docs])
 
 
 
 
 
 
 
 
 
339
  else:
340
+ context_str = "No documents available."
341
  yield "No documents available. Please upload PDF documents to answer questions."
342
  return
343
+
344
+ if model == "@cf/meta/llama-3.1-8b-instruct":
345
+ # Use Cloudflare API
346
+ for partial_response in get_response_from_cloudflare(prompt="", context=context_str, query=message, num_calls=num_calls, temperature=temperature, search_type="pdf"):
347
+ first_line = partial_response.split('\n')[0] if partial_response else ''
348
+ # logging.info(f"Generated Response (first line): {first_line}")
349
+ yield partial_response
350
+ else:
351
+ # Use Hugging Face API
352
+ for partial_response in get_response_from_pdf(message, model, selected_docs, num_calls=num_calls, temperature=temperature):
353
+ first_line = partial_response.split('\n')[0] if partial_response else ''
354
+ # logging.info(f"Generated Response (first line): {first_line}")
355
+ yield partial_response
356
  except Exception as e:
357
  logging.error(f"Error with {model}: {str(e)}")
358
  if "microsoft/Phi-3-mini-4k-instruct" in model:
 
570
  custom_placeholder = "Ask a question (Note: You can toggle between Web Search and PDF Chat in Additional Inputs below)"
571
 
572
  demo = gr.ChatInterface(
573
+ respond,
574
  additional_inputs=[
575
  gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[3]),
576
  gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
 
658
  )
659
 
660
  if __name__ == "__main__":
661
+ demo.launch(share=True)