Phoenix21 commited on
Commit
741a8ce
·
verified ·
1 Parent(s): 1e7a57d

fix: Remove internal prompt instructions from user-facing responses

Browse files

- Updated prompt constructions in `handle_query` and `AnswerExpander` to use direct instructions without conversational preambles.
- Modified `GeminiLLM` class to utilize system and user roles, ensuring internal instructions are not echoed back to users.
- Ensured that all responses returned to users are free from embedded prompt instructions.
- Enhanced prompt clarity for better content generation and user experience.
- Maintained existing functionalities such as caching, retrieval, and web search integrations.

Files changed (1) hide show
  1. app.py +29 -23
app.py CHANGED
@@ -78,8 +78,20 @@ class GeminiLLM(LLM):
78
  chat_session = model.start_chat(history=[])
79
  logger.debug("Chat session started.")
80
 
81
- response = chat_session.send_message(prompt)
82
- logger.debug(f"Prompt sent to model: {prompt}")
 
 
 
 
 
 
 
 
 
 
 
 
83
  logger.debug(f"Raw response received: {response.text}")
84
 
85
  return response.text
@@ -187,7 +199,7 @@ class QuestionSanityChecker:
187
 
188
  def is_relevant(self, question: str) -> bool:
189
  prompt = (
190
- f"You are an assistant that determines whether a question is relevant to daily wellness.\n\n"
191
  f"Question: {question}\n\n"
192
  f"Is the above question relevant to daily wellness? Respond with 'Yes' or 'No' only."
193
  )
@@ -255,18 +267,16 @@ class AnswerExpander:
255
  "Provide a thorough, in-depth explanation, adding relevant tips and context, "
256
  "while remaining creative and brand-aligned. "
257
  if detail else
258
- "Please provide a concise response in no more than 4 sentences."
259
  )
260
 
261
  prompt = (
262
- f"You are Daily Wellness AI, a friendly wellness expert. Below are multiple "
263
- f"potential answers retrieved from a local knowledge base. You have a user question.\n\n"
 
264
  f"Question: {query}\n\n"
265
  f"Retrieved Answers:\n{reference_block}\n\n"
266
- f"Please synthesize these references into a single cohesive, creative, and brand-aligned response. "
267
- f"{detail_instructions} "
268
- f"End with a short inspirational note.\n\n"
269
- "Disclaimer: This is general wellness information, not a substitute for professional medical advice."
270
  )
271
 
272
  logger.debug(f"Generated prompt for answer expansion: {prompt}")
@@ -402,12 +412,10 @@ def handle_query(query: str, detail: bool = False) -> str:
402
  # Combine any cached answer (if it exists) with the web result
403
  if cached_answer:
404
  blend_prompt = (
405
- f"You previously provided the following answer:\n\n"
406
- f"{cached_answer}\n\n"
407
- f"Now, we also have these web results:\n\n"
408
- f"{web_search_response}\n\n"
409
- "Please combine them into a more creative and accurate response. "
410
- "Add positivity and end with a short inspirational note."
411
  )
412
  final_answer = llm._call(blend_prompt).strip()
413
  else:
@@ -424,16 +432,15 @@ def handle_query(query: str, detail: bool = False) -> str:
424
  return final_answer
425
 
426
  # 6) If similarity is sufficient, we will finalize an answer from the knowledge base
427
- responses = [ans[0] for ans in retrieved]
428
 
429
  # 6a) If we have a cached answer, let's blend it with the new knowledge base data
430
  if cached_answer:
431
  blend_prompt = (
432
- f"You provided this answer previously:\n\n"
433
- f"{cached_answer}\n\n"
434
- f"Now, here are some newly retrieved answers:\n\n"
435
- f"{chr(10).join(f'- {r}' for r in responses)}\n\n"
436
- "Please synthesize these together, adding new insights, creativity, and a short inspirational note at the end."
437
  )
438
  final_answer = llm._call(blend_prompt).strip()
439
  else:
@@ -449,7 +456,6 @@ def handle_query(query: str, detail: bool = False) -> str:
449
  logger.debug("Exception details:", exc_info=True)
450
  return "An error occurred while processing your request."
451
 
452
-
453
  ###############################################################################
454
  # 11) Gradio Interface
455
  ###############################################################################
 
78
  chat_session = model.start_chat(history=[])
79
  logger.debug("Chat session started.")
80
 
81
+ # Use role-based messages if supported
82
+ system_message = {
83
+ "role": "system",
84
+ "content": "You are Daily Wellness AI, a friendly and professional wellness assistant."
85
+ }
86
+ user_message = {
87
+ "role": "user",
88
+ "content": prompt
89
+ }
90
+
91
+ chat_session.send_message(system_message)
92
+ chat_session.send_message(user_message)
93
+
94
+ response = chat_session.get_response()
95
  logger.debug(f"Raw response received: {response.text}")
96
 
97
  return response.text
 
199
 
200
  def is_relevant(self, question: str) -> bool:
201
  prompt = (
202
+ f"Determine whether the following question is relevant to daily wellness.\n\n"
203
  f"Question: {question}\n\n"
204
  f"Is the above question relevant to daily wellness? Respond with 'Yes' or 'No' only."
205
  )
 
267
  "Provide a thorough, in-depth explanation, adding relevant tips and context, "
268
  "while remaining creative and brand-aligned. "
269
  if detail else
270
+ "Provide a concise response in no more than 4 sentences."
271
  )
272
 
273
  prompt = (
274
+ f"Synthesize the following retrieved answers into a single cohesive, creative, and brand-aligned response. "
275
+ f"{detail_instructions} "
276
+ f"Conclude with a short inspirational note.\n\n"
277
  f"Question: {query}\n\n"
278
  f"Retrieved Answers:\n{reference_block}\n\n"
279
+ "Disclaimer: This is general wellness information and not a substitute for professional medical advice."
 
 
 
280
  )
281
 
282
  logger.debug(f"Generated prompt for answer expansion: {prompt}")
 
412
  # Combine any cached answer (if it exists) with the web result
413
  if cached_answer:
414
  blend_prompt = (
415
+ f"Combine the following previous answer with the new web results to create a more creative and accurate response. "
416
+ f"Add positivity and conclude with a short inspirational note.\n\n"
417
+ f"Previous Answer:\n{cached_answer}\n\n"
418
+ f"Web Results:\n{web_search_response}"
 
 
419
  )
420
  final_answer = llm._call(blend_prompt).strip()
421
  else:
 
432
  return final_answer
433
 
434
  # 6) If similarity is sufficient, we will finalize an answer from the knowledge base
435
+ responses = [ans for ans, score in retrieved]
436
 
437
  # 6a) If we have a cached answer, let's blend it with the new knowledge base data
438
  if cached_answer:
439
  blend_prompt = (
440
+ f"Combine the previous answer with the newly retrieved answers to enhance creativity and accuracy. "
441
+ f"Add new insights, creativity, and conclude with a short inspirational note.\n\n"
442
+ f"Previous Answer:\n{cached_answer}\n\n"
443
+ f"New Retrieved Answers:\n" + "\n".join(f"- {r}" for r in responses)
 
444
  )
445
  final_answer = llm._call(blend_prompt).strip()
446
  else:
 
456
  logger.debug("Exception details:", exc_info=True)
457
  return "An error occurred while processing your request."
458
 
 
459
  ###############################################################################
460
  # 11) Gradio Interface
461
  ###############################################################################