Pijush2023 commited on
Commit
7aa1475
·
verified ·
1 Parent(s): 25328c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -597,7 +597,7 @@ import traceback
597
  def generate_answer(message, choice, retrieval_mode, selected_model):
598
  logging.debug(f"generate_answer called with choice: {choice}, retrieval_mode: {retrieval_mode}, and selected_model: {selected_model}")
599
 
600
- # Logic for disabling options for Phi-3.5
601
  if selected_model == phi_pipe:
602
  choice = None
603
  retrieval_mode = None
@@ -611,10 +611,10 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
611
  else:
612
  prompt_template = QA_CHAIN_PROMPT_1 # Fallback to template1
613
 
 
614
  if retrieval_mode == "VDB":
615
  logging.debug("Using VDB retrieval mode")
616
- # Adjust this block to handle both LM-1 and LM-3
617
- if selected_model in [gpt_model, gpt_mini_model]: # Both LM-1 and LM-3 should use the same logic
618
  logging.debug(f"Selected model: {'LM-1' if selected_model == gpt_model else 'LM-3'}")
619
  retriever = gpt_retriever
620
  context = retriever.get_relevant_documents(message)
@@ -659,11 +659,13 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
659
  logging.error("LM-2 did not return any response.")
660
  return "No response generated.", []
661
 
 
662
  elif retrieval_mode == "KGF":
663
  logging.debug("Using KGF retrieval mode")
664
  response = chain_neo4j.invoke({"question": message})
665
  logging.debug(f"KGF response: {response}")
666
  return response, extract_addresses(response)
 
667
  else:
668
  logging.error("Invalid retrieval mode selected.")
669
  return "Invalid retrieval mode selected.", []
 
597
  def generate_answer(message, choice, retrieval_mode, selected_model):
598
  logging.debug(f"generate_answer called with choice: {choice}, retrieval_mode: {retrieval_mode}, and selected_model: {selected_model}")
599
 
600
+ # Logic for disabling options for Phi-3.5 (LM-2)
601
  if selected_model == phi_pipe:
602
  choice = None
603
  retrieval_mode = None
 
611
  else:
612
  prompt_template = QA_CHAIN_PROMPT_1 # Fallback to template1
613
 
614
+ # VDB retrieval mode
615
  if retrieval_mode == "VDB":
616
  logging.debug("Using VDB retrieval mode")
617
+ if selected_model in [gpt_model, gpt_mini_model]: # Handle both LM-1 and LM-3
 
618
  logging.debug(f"Selected model: {'LM-1' if selected_model == gpt_model else 'LM-3'}")
619
  retriever = gpt_retriever
620
  context = retriever.get_relevant_documents(message)
 
659
  logging.error("LM-2 did not return any response.")
660
  return "No response generated.", []
661
 
662
+ # KGF retrieval mode
663
  elif retrieval_mode == "KGF":
664
  logging.debug("Using KGF retrieval mode")
665
  response = chain_neo4j.invoke({"question": message})
666
  logging.debug(f"KGF response: {response}")
667
  return response, extract_addresses(response)
668
+
669
  else:
670
  logging.error("Invalid retrieval mode selected.")
671
  return "Invalid retrieval mode selected.", []