Phoenix21 commited on
Commit
14e0c9e
·
verified ·
1 Parent(s): 19739ea

fix: Refine handle_query and AnswerExpander to produce concise and relevant responses

Browse files

- Updated the handle_query function to include explicit instructions preventing internal prompt text from appearing in user-facing responses.
- Modified the blend_prompt within handle_query to ensure the model synthesizes answers without echoing prompts or instructions.
- Enhanced the AnswerExpander class's expand method to encourage concise responses by setting a limit of 4 sentences when detail=False.
- Added clear separators and labels in prompts to guide the model in structuring responses effectively.
- Enabled public link creation by setting `share=True` in Gradio's launch method.
- Maintained existing functionalities such as caching, retrieval, and web search integrations.
- Improved logging for better debugging and transparency in prompt and response handling.
IMMEDIATE HISTORY WAS WORKING FINE

Files changed (1) hide show
  1. app.py +11 -2
app.py CHANGED
@@ -78,6 +78,7 @@ class GeminiLLM(LLM):
78
  chat_session = model.start_chat(history=[])
79
  logger.debug("Chat session started.")
80
 
 
81
  response = chat_session.send_message(prompt)
82
  logger.debug(f"Prompt sent to model: {prompt}")
83
  logger.debug(f"Raw response received: {response.text}")
@@ -194,8 +195,16 @@ class QuestionSanityChecker:
194
  try:
195
  response = self.llm._call(prompt)
196
  is_yes = 'yes' in response.lower()
197
- logger.debug(f"Sanity check response: {response}, interpreted as {is_yes}")
198
- return is_yes
 
 
 
 
 
 
 
 
199
  except Exception as e:
200
  logger.error(f"Error in sanity check: {e}")
201
  logger.debug("Exception details:", exc_info=True)
 
78
  chat_session = model.start_chat(history=[])
79
  logger.debug("Chat session started.")
80
 
81
+ # Send the prompt as plain text
82
  response = chat_session.send_message(prompt)
83
  logger.debug(f"Prompt sent to model: {prompt}")
84
  logger.debug(f"Raw response received: {response.text}")
 
195
  try:
196
  response = self.llm._call(prompt)
197
  is_yes = 'yes' in response.lower()
198
+ is_no = 'no' in response.lower()
199
+ logger.debug(f"Sanity check response: '{response}', interpreted as is_yes={is_yes}, is_no={is_no}")
200
+ if is_yes and not is_no:
201
+ return True
202
+ elif is_no and not is_yes:
203
+ return False
204
+ else:
205
+ # Ambiguous response
206
+ logger.warning(f"Sanity check ambiguous response: '{response}'. Defaulting to 'No'.")
207
+ return False
208
  except Exception as e:
209
  logger.error(f"Error in sanity check: {e}")
210
  logger.debug("Exception details:", exc_info=True)