Shreyas094 commited on
Commit
3a68aba
·
verified ·
1 Parent(s): c8a79f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +132 -43
app.py CHANGED
@@ -329,51 +329,138 @@ def get_response_from_duckduckgo(query, model, context, num_calls=1, temperature
329
 
330
  yield full_response.strip()
331
 
332
- def respond(message, history, model, temperature, num_calls, selected_docs):
333
- logging.info(f"User Query: {message}")
334
- logging.info(f"Model Used: {model}")
335
- logging.info(f"Selected Documents: {selected_docs}")
336
 
337
- try:
338
- embed = get_embeddings()
339
- if os.path.exists("faiss_database"):
340
- database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
341
- retriever = database.as_retriever(search_kwargs={"k": 20})
342
-
343
- all_relevant_docs = retriever.get_relevant_documents(message)
344
- relevant_docs = [doc for doc in all_relevant_docs if doc.metadata["source"] in selected_docs]
345
-
346
- if not relevant_docs:
347
- yield "No relevant information found in the selected documents. Please try selecting different documents or rephrasing your query."
348
- return
349
 
350
- context_str = "\n".join([doc.page_content for doc in relevant_docs])
351
- logging.info(f"Context length: {len(context_str)}")
352
- else:
353
- context_str = "No documents available."
354
- yield "No documents available. Please upload PDF documents to answer questions."
355
- return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
356
 
357
- if model.startswith("duckduckgo/"):
358
- # Use DuckDuckGo chat with context
359
- for partial_response in get_response_from_duckduckgo(message, model, context_str, num_calls, temperature):
360
- yield partial_response
361
- elif model == "@cf/meta/llama-3.1-8b-instruct":
362
- # Use Cloudflare API
363
- for partial_response in get_response_from_cloudflare(prompt="", context=context_str, query=message, num_calls=num_calls, temperature=temperature, search_type="pdf"):
364
- yield partial_response
365
- else:
366
- # Use Hugging Face API
367
- for partial_response in get_response_from_pdf(message, model, selected_docs, num_calls=num_calls, temperature=temperature):
368
- yield partial_response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
369
  except Exception as e:
370
- logging.error(f"Error with {model}: {str(e)}")
371
- if "microsoft/Phi-3-mini-4k-instruct" in model:
372
- logging.info("Falling back to Mistral model due to Phi-3 error")
373
- fallback_model = "mistralai/Mistral-7B-Instruct-v0.3"
374
- yield from respond(message, history, fallback_model, temperature, num_calls, selected_docs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
375
  else:
376
- yield f"An error occurred with the {model} model: {str(e)}. Please try again or select a different model."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377
 
378
  logging.basicConfig(level=logging.DEBUG)
379
 
@@ -563,16 +650,18 @@ use_web_search = gr.Checkbox(label="Use Web Search", value=True)
563
  custom_placeholder = "Ask a question (Note: You can toggle between Web Search and PDF Chat in Additional Inputs below)"
564
 
565
  # Update the demo interface
 
566
  demo = gr.ChatInterface(
567
  respond,
568
  additional_inputs=[
569
  gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[3]),
570
  gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
571
  gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
572
- document_selector
 
573
  ],
574
- title="AI-powered PDF Chat and DuckDuckGo Chat Assistant",
575
- description="Chat with your PDFs or use DuckDuckGo chat models to answer questions.",
576
  theme=gr.themes.Soft(
577
  primary_hue="orange",
578
  secondary_hue="amber",
 
329
 
330
  yield full_response.strip()
331
 
332
+ class ConversationManager:
333
+ def __init__(self):
334
+ self.history = []
335
+ self.current_context = None
336
 
337
+ def add_interaction(self, query, response):
338
+ self.history.append((query, response))
339
+ self.current_context = f"Previous query: {query}\nPrevious response summary: {response[:200]}..."
 
 
 
 
 
 
 
 
 
340
 
341
+ def get_context(self):
342
+ return self.current_context
343
+
344
+ conversation_manager = ConversationManager()
345
+
346
+ def get_web_search_results(query: str, max_results: int = 10) -> List[Dict[str, str]]:
347
+ try:
348
+ results = list(DDGS().text(query, max_results=max_results))
349
+ if not results:
350
+ print(f"No results found for query: {query}")
351
+ return results
352
+ except Exception as e:
353
+ print(f"An error occurred during web search: {str(e)}")
354
+ return [{"error": f"An error occurred during web search: {str(e)}"}]
355
+
356
+ def rephrase_query(original_query: str, conversation_manager: ConversationManager) -> str:
357
+ context = conversation_manager.get_context()
358
+ if context:
359
+ prompt = f"""You are a highly intelligent conversational chatbot. Your task is to analyze the given context and new query, then decide whether to rephrase the query with or without incorporating the context. Follow these steps:
360
+
361
+ 1. Determine if the new query is a continuation of the previous conversation or an entirely new topic.
362
+ 2. If it's a continuation, rephrase the query by incorporating relevant information from the context to make it more specific and contextual.
363
+ 3. If it's a new topic, rephrase the query to make it more appropriate for a web search, focusing on clarity and accuracy without using the previous context.
364
+ 4. Provide ONLY the rephrased query without any additional explanation or reasoning.
365
 
366
+ Context: {context}
367
+
368
+ New query: {original_query}
369
+
370
+ Rephrased query:"""
371
+ response = DDGS().chat(prompt, model="llama-3.1-70b")
372
+ rephrased_query = response.split('\n')[0].strip()
373
+ return rephrased_query
374
+ return original_query
375
+
376
+ def summarize_web_results(query: str, search_results: List[Dict[str, str]], conversation_manager: ConversationManager) -> str:
377
+ try:
378
+ context = conversation_manager.get_context()
379
+ search_context = "\n\n".join([f"Title: {result['title']}\nContent: {result['body']}" for result in search_results])
380
+
381
+ prompt = f"""You are a highly intelligent & expert analyst and your job is to skillfully articulate the web search results about '{query}' and considering the context: {context},
382
+ You have to create a comprehensive news summary FOCUSING on the context provided to you.
383
+ Include key facts, relevant statistics, and expert opinions if available.
384
+ Ensure the article is well-structured with an introduction, main body, and conclusion, IF NECESSARY.
385
+ Address the query in the context of the ongoing conversation IF APPLICABLE.
386
+ Cite sources directly within the generated text and not at the end of the generated text, integrating URLs where appropriate to support the information provided:
387
+
388
+ {search_context}
389
+
390
+ Article:"""
391
+
392
+ summary = DDGS().chat(prompt, model="llama-3-70b")
393
+ return summary
394
  except Exception as e:
395
+ return f"An error occurred during summarization: {str(e)}"
396
+
397
+ # Modify the existing respond function to handle both PDF and web search
398
+ def respond(message, history, model, temperature, num_calls, selected_docs, use_web_search):
399
+ logging.info(f"User Query: {message}")
400
+ logging.info(f"Model Used: {model}")
401
+ logging.info(f"Selected Documents: {selected_docs}")
402
+ logging.info(f"Use Web Search: {use_web_search}")
403
+
404
+ if use_web_search:
405
+ original_query = message
406
+ rephrased_query = rephrase_query(message, conversation_manager)
407
+ logging.info(f"Original query: {original_query}")
408
+ logging.info(f"Rephrased query: {rephrased_query}")
409
+
410
+ final_summary = ""
411
+ for _ in range(num_calls):
412
+ search_results = get_web_search_results(rephrased_query)
413
+ if not search_results:
414
+ final_summary += f"No search results found for the query: {rephrased_query}\n\n"
415
+ elif "error" in search_results[0]:
416
+ final_summary += search_results[0]["error"] + "\n\n"
417
+ else:
418
+ summary = summarize_web_results(rephrased_query, search_results, conversation_manager)
419
+ final_summary += summary + "\n\n"
420
+
421
+ if final_summary:
422
+ conversation_manager.add_interaction(original_query, final_summary)
423
+ yield final_summary
424
  else:
425
+ yield "Unable to generate a response. Please try a different query."
426
+ else:
427
+ # Existing PDF search logic
428
+ try:
429
+ embed = get_embeddings()
430
+ if os.path.exists("faiss_database"):
431
+ database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
432
+ retriever = database.as_retriever(search_kwargs={"k": 20})
433
+
434
+ all_relevant_docs = retriever.get_relevant_documents(message)
435
+ relevant_docs = [doc for doc in all_relevant_docs if doc.metadata["source"] in selected_docs]
436
+
437
+ if not relevant_docs:
438
+ yield "No relevant information found in the selected documents. Please try selecting different documents or rephrasing your query."
439
+ return
440
+
441
+ context_str = "\n".join([doc.page_content for doc in relevant_docs])
442
+ logging.info(f"Context length: {len(context_str)}")
443
+ else:
444
+ context_str = "No documents available."
445
+ yield "No documents available. Please upload PDF documents to answer questions."
446
+ return
447
+
448
+ if model == "@cf/meta/llama-3.1-8b-instruct":
449
+ # Use Cloudflare API
450
+ for response in get_response_from_cloudflare(prompt="", context=context_str, query=message, num_calls=num_calls, temperature=temperature, search_type="pdf"):
451
+ yield response
452
+ else:
453
+ # Use Hugging Face API
454
+ for response in get_response_from_pdf(message, model, selected_docs, num_calls=num_calls, temperature=temperature):
455
+ yield response
456
+ except Exception as e:
457
+ logging.error(f"Error with {model}: {str(e)}")
458
+ if "microsoft/Phi-3-mini-4k-instruct" in model:
459
+ logging.info("Falling back to Mistral model due to Phi-3 error")
460
+ fallback_model = "mistralai/Mistral-7B-Instruct-v0.3"
461
+ yield from respond(message, history, fallback_model, temperature, num_calls, selected_docs, use_web_search)
462
+ else:
463
+ yield f"An error occurred with the {model} model: {str(e)}. Please try again or select a different model."
464
 
465
  logging.basicConfig(level=logging.DEBUG)
466
 
 
650
  custom_placeholder = "Ask a question (Note: You can toggle between Web Search and PDF Chat in Additional Inputs below)"
651
 
652
  # Update the demo interface
653
+ # Update the Gradio interface
654
  demo = gr.ChatInterface(
655
  respond,
656
  additional_inputs=[
657
  gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[3]),
658
  gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
659
  gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
660
+ gr.CheckboxGroup(label="Select documents to query", choices=[]),
661
+ gr.Checkbox(label="Use Web Search", value=False)
662
  ],
663
+ title="AI-powered PDF Chat and Web Search Assistant",
664
+ description="Chat with your PDFs or use web search to answer questions.",
665
  theme=gr.themes.Soft(
666
  primary_hue="orange",
667
  secondary_hue="amber",