Pijush2023 commited on
Commit
6df654f
·
verified ·
1 Parent(s): f8bd341

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -188
app.py CHANGED
@@ -593,186 +593,7 @@ Detailed Answer:
593
 
594
  import traceback
595
 
596
- # def generate_answer(message, choice, retrieval_mode, selected_model):
597
- # logging.debug(f"generate_answer called with choice: {choice}, retrieval_mode: {retrieval_mode}, and selected_model: {selected_model}")
598
-
599
- # # Logic for disabling options for Phi-3.5
600
- # if selected_model == "LM-2":
601
- # choice = None
602
- # retrieval_mode = None
603
-
604
- # try:
605
- # # Select the appropriate template based on the choice and model
606
- # if choice == "Details" and selected_model == chat_model1: # GPT-4o-mini
607
- # prompt_template = PromptTemplate(input_variables=["context", "question"], template=gpt4o_mini_template_details)
608
- # elif choice == "Details":
609
- # prompt_template = QA_CHAIN_PROMPT_1
610
- # elif choice == "Conversational":
611
- # prompt_template = QA_CHAIN_PROMPT_2
612
- # else:
613
- # prompt_template = QA_CHAIN_PROMPT_1 # Fallback to template1
614
-
615
- # # # Handle hotel-related queries
616
- # # if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
617
- # # logging.debug("Handling hotel-related query")
618
- # # response = fetch_google_hotels()
619
- # # logging.debug(f"Hotel response: {response}")
620
- # # return response, extract_addresses(response)
621
-
622
- # # # Handle restaurant-related queries
623
- # # if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
624
- # # logging.debug("Handling restaurant-related query")
625
- # # response = fetch_yelp_restaurants()
626
- # # logging.debug(f"Restaurant response: {response}")
627
- # # return response, extract_addresses(response)
628
-
629
- # # # Handle flight-related queries
630
- # # if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
631
- # # logging.debug("Handling flight-related query")
632
- # # response = fetch_google_flights()
633
- # # logging.debug(f"Flight response: {response}")
634
- # # return response, extract_addresses(response)
635
-
636
- # # Retrieval-based response
637
- # if retrieval_mode == "VDB":
638
- # logging.debug("Using VDB retrieval mode")
639
- # if selected_model == chat_model:
640
- # logging.debug("Selected model: LM-1")
641
- # retriever = gpt_retriever
642
- # context = retriever.get_relevant_documents(message)
643
- # logging.debug(f"Retrieved context: {context}")
644
-
645
- # prompt = prompt_template.format(context=context, question=message)
646
- # logging.debug(f"Generated prompt: {prompt}")
647
-
648
- # qa_chain = RetrievalQA.from_chain_type(
649
- # llm=chat_model,
650
- # chain_type="stuff",
651
- # retriever=retriever,
652
- # chain_type_kwargs={"prompt": prompt_template}
653
- # )
654
- # response = qa_chain({"query": message})
655
- # logging.debug(f"LM-1 response: {response}")
656
- # return response['result'], extract_addresses(response['result'])
657
-
658
- # elif selected_model == chat_model1:
659
- # logging.debug("Selected model: LM-3")
660
- # retriever = gpt_retriever
661
- # context = retriever.get_relevant_documents(message)
662
- # logging.debug(f"Retrieved context: {context}")
663
-
664
- # prompt = prompt_template.format(context=context, question=message)
665
- # logging.debug(f"Generated prompt: {prompt}")
666
-
667
- # qa_chain = RetrievalQA.from_chain_type(
668
- # llm=chat_model1,
669
- # chain_type="stuff",
670
- # retriever=retriever,
671
- # chain_type_kwargs={"prompt": prompt_template}
672
- # )
673
- # response = qa_chain({"query": message})
674
- # logging.debug(f"LM-3 response: {response}")
675
- # return response['result'], extract_addresses(response['result'])
676
- # #-----------------------------------------------------------------------------------------------------------------
677
-
678
- # # Modify the Phi-3.5 prompt to include the selected file
679
- # elif selected_model == phi_pipe:
680
- # retriever = phi_retriever
681
- # context_documents = retriever.get_relevant_documents(message)
682
- # context = "\n".join([doc.page_content for doc in context_documents])
683
-
684
- # prompt = phi_custom_template.format(context=context, question=message, document_name=selected_file)
685
- # response = selected_model(prompt, **{
686
- # "max_new_tokens": 250,
687
- # "return_full_text": True,
688
- # "temperature": 0.0,
689
- # "do_sample": False,
690
- # })
691
-
692
- # if response:
693
- # generated_text = response[0]['generated_text']
694
- # cleaned_response = clean_response(generated_text)
695
- # return cleaned_response
696
- # else:
697
- # return "No response generated.", []
698
-
699
-
700
-
701
- # #------------------------------------------------------------------------------------------------------------
702
- # # elif selected_model == phi_pipe:
703
- # # logging.debug("Selected model: LM-2")
704
- # # retriever = phi_retriever
705
- # # context_documents = retriever.get_relevant_documents(message)
706
- # # context = "\n".join([doc.page_content for doc in context_documents])
707
- # # logging.debug(f"Retrieved context for LM-2: {context}")
708
-
709
- # # # Use the correct template variable
710
- # # prompt = phi_custom_template.format(context=context, question=message)
711
- # # logging.debug(f"Generated LM-2 prompt: {prompt}")
712
-
713
- # # response = selected_model(prompt, **{
714
- # # "max_new_tokens": 250,
715
- # # "return_full_text": True,
716
- # # "temperature": 0.0,
717
- # # "do_sample": False,
718
- # # })
719
-
720
- # # if response:
721
- # # generated_text = response[0]['generated_text']
722
- # # logging.debug(f"LM-2 Response: {generated_text}")
723
- # # cleaned_response = clean_response(generated_text)
724
- # # return cleaned_response, extract_addresses(cleaned_response)
725
- # # else:
726
- # # logging.error("LM-2 did not return any response.")
727
- # # return "No response generated.", []
728
-
729
- # elif retrieval_mode == "KGF":
730
- # logging.debug("Using KGF retrieval mode")
731
- # response = chain_neo4j.invoke({"question": message})
732
- # logging.debug(f"KGF response: {response}")
733
- # return response, extract_addresses(response)
734
- # else:
735
- # logging.error("Invalid retrieval mode selected.")
736
- # return "Invalid retrieval mode selected.", []
737
-
738
- # except Exception as e:
739
- # logging.error(f"Error in generate_answer: {str(e)}")
740
- # logging.error(traceback.format_exc())
741
- # return "Sorry, I encountered an error while processing your request.", []
742
-
743
- # # def generate_answer(message, choice, retrieval_mode, selected_model, selected_file):
744
- # # # Ensure a file is selected
745
- # # if not selected_file:
746
- # # return "Please choose a file to proceed."
747
-
748
- # # # Modify the Phi-3.5 prompt to include the selected file
749
- # # if selected_model == phi_pipe:
750
- # # retriever = phi_retriever
751
- # # context_documents = retriever.get_relevant_documents(message)
752
- # # context = "\n".join([doc.page_content for doc in context_documents])
753
-
754
- # # prompt = phi_custom_template.format(context=context, question=message, document_name=selected_file)
755
- # # response = selected_model(prompt, **{
756
- # # "max_new_tokens": 250,
757
- # # "return_full_text": True,
758
- # # "temperature": 0.0,
759
- # # "do_sample": False,
760
- # # })
761
-
762
- # # if response:
763
- # # generated_text = response[0]['generated_text']
764
- # # cleaned_response = clean_response(generated_text)
765
- # # return cleaned_response
766
- # # else:
767
- # # return "No response generated.", []
768
-
769
-
770
-
771
- def generate_answer(message, choice, retrieval_mode, selected_model, selected_file):
772
- # Ensure a file is selected
773
- if not selected_file:
774
- return "Please choose a file to proceed."
775
-
776
  logging.debug(f"generate_answer called with choice: {choice}, retrieval_mode: {retrieval_mode}, and selected_model: {selected_model}")
777
 
778
  # Logic for disabling options for Phi-3.5
@@ -791,6 +612,27 @@ def generate_answer(message, choice, retrieval_mode, selected_model, selected_fi
791
  else:
792
  prompt_template = QA_CHAIN_PROMPT_1 # Fallback to template1
793
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
794
  # Retrieval-based response
795
  if retrieval_mode == "VDB":
796
  logging.debug("Using VDB retrieval mode")
@@ -833,23 +675,30 @@ def generate_answer(message, choice, retrieval_mode, selected_model, selected_fi
833
  return response['result'], extract_addresses(response['result'])
834
 
835
  elif selected_model == phi_pipe:
 
836
  retriever = phi_retriever
837
  context_documents = retriever.get_relevant_documents(message)
838
  context = "\n".join([doc.page_content for doc in context_documents])
839
-
840
- prompt = phi_custom_template.format(context=context, question=message, document_name=selected_file)
 
 
 
 
841
  response = selected_model(prompt, **{
842
  "max_new_tokens": 250,
843
  "return_full_text": True,
844
  "temperature": 0.0,
845
  "do_sample": False,
846
  })
847
-
848
  if response:
849
  generated_text = response[0]['generated_text']
 
850
  cleaned_response = clean_response(generated_text)
851
- return cleaned_response
852
  else:
 
853
  return "No response generated.", []
854
 
855
  elif retrieval_mode == "KGF":
@@ -869,8 +718,6 @@ def generate_answer(message, choice, retrieval_mode, selected_model, selected_fi
869
 
870
 
871
 
872
-
873
-
874
  def add_message(history, message):
875
  history.append((message, None))
876
  return history, gr.Textbox(value="", interactive=True, show_label=False)
@@ -1635,7 +1482,7 @@ files_list = [
1635
  ]
1636
 
1637
 
1638
- file_dropdown = gr.Dropdown(label="Select Document", choices=files_list, value=None)
1639
 
1640
 
1641
  with gr.Blocks(theme='gradio/soft') as demo:
@@ -1646,7 +1493,7 @@ with gr.Blocks(theme='gradio/soft') as demo:
1646
  state = gr.State()
1647
 
1648
  # File dropdown
1649
- file_dropdown = gr.Dropdown(label="Select Document", choices=files_list, value=None)
1650
 
1651
  chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
1652
  choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational",interactive=False,visible=False)
 
593
 
594
  import traceback
595
 
596
+ def generate_answer(message, choice, retrieval_mode, selected_model):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
597
  logging.debug(f"generate_answer called with choice: {choice}, retrieval_mode: {retrieval_mode}, and selected_model: {selected_model}")
598
 
599
  # Logic for disabling options for Phi-3.5
 
612
  else:
613
  prompt_template = QA_CHAIN_PROMPT_1 # Fallback to template1
614
 
615
+ # # Handle hotel-related queries
616
+ # if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
617
+ # logging.debug("Handling hotel-related query")
618
+ # response = fetch_google_hotels()
619
+ # logging.debug(f"Hotel response: {response}")
620
+ # return response, extract_addresses(response)
621
+
622
+ # # Handle restaurant-related queries
623
+ # if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
624
+ # logging.debug("Handling restaurant-related query")
625
+ # response = fetch_yelp_restaurants()
626
+ # logging.debug(f"Restaurant response: {response}")
627
+ # return response, extract_addresses(response)
628
+
629
+ # # Handle flight-related queries
630
+ # if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
631
+ # logging.debug("Handling flight-related query")
632
+ # response = fetch_google_flights()
633
+ # logging.debug(f"Flight response: {response}")
634
+ # return response, extract_addresses(response)
635
+
636
  # Retrieval-based response
637
  if retrieval_mode == "VDB":
638
  logging.debug("Using VDB retrieval mode")
 
675
  return response['result'], extract_addresses(response['result'])
676
 
677
  elif selected_model == phi_pipe:
678
+ logging.debug("Selected model: LM-2")
679
  retriever = phi_retriever
680
  context_documents = retriever.get_relevant_documents(message)
681
  context = "\n".join([doc.page_content for doc in context_documents])
682
+ logging.debug(f"Retrieved context for LM-2: {context}")
683
+
684
+ # Use the correct template variable
685
+ prompt = phi_custom_template.format(context=context, question=message)
686
+ logging.debug(f"Generated LM-2 prompt: {prompt}")
687
+
688
  response = selected_model(prompt, **{
689
  "max_new_tokens": 250,
690
  "return_full_text": True,
691
  "temperature": 0.0,
692
  "do_sample": False,
693
  })
694
+
695
  if response:
696
  generated_text = response[0]['generated_text']
697
+ logging.debug(f"LM-2 Response: {generated_text}")
698
  cleaned_response = clean_response(generated_text)
699
+ return cleaned_response, extract_addresses(cleaned_response)
700
  else:
701
+ logging.error("LM-2 did not return any response.")
702
  return "No response generated.", []
703
 
704
  elif retrieval_mode == "KGF":
 
718
 
719
 
720
 
 
 
721
  def add_message(history, message):
722
  history.append((message, None))
723
  return history, gr.Textbox(value="", interactive=True, show_label=False)
 
1482
  ]
1483
 
1484
 
1485
+
1486
 
1487
 
1488
  with gr.Blocks(theme='gradio/soft') as demo:
 
1493
  state = gr.State()
1494
 
1495
  # File dropdown
1496
+ # file_dropdown = gr.Dropdown(label="Select Document", choices=files_list, value=None)
1497
 
1498
  chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
1499
  choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational",interactive=False,visible=False)