fix: Remove internal prompt instructions from user-facing responses
Browse files- Updated prompt constructions in `handle_query` and `AnswerExpander` to use direct instructions without conversational preambles.
- Modified `GeminiLLM` class to utilize system and user roles, ensuring internal instructions are not echoed back to users.
- Ensured that all responses returned to users are free from embedded prompt instructions.
- Enhanced prompt clarity for better content generation and user experience.
- Maintained existing functionalities such as caching, retrieval, and web search integrations.
app.py
CHANGED
@@ -78,8 +78,20 @@ class GeminiLLM(LLM):
|
|
78 |
chat_session = model.start_chat(history=[])
|
79 |
logger.debug("Chat session started.")
|
80 |
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
logger.debug(f"Raw response received: {response.text}")
|
84 |
|
85 |
return response.text
|
@@ -187,7 +199,7 @@ class QuestionSanityChecker:
|
|
187 |
|
188 |
def is_relevant(self, question: str) -> bool:
|
189 |
prompt = (
|
190 |
-
f"
|
191 |
f"Question: {question}\n\n"
|
192 |
f"Is the above question relevant to daily wellness? Respond with 'Yes' or 'No' only."
|
193 |
)
|
@@ -255,18 +267,16 @@ class AnswerExpander:
|
|
255 |
"Provide a thorough, in-depth explanation, adding relevant tips and context, "
|
256 |
"while remaining creative and brand-aligned. "
|
257 |
if detail else
|
258 |
-
"
|
259 |
)
|
260 |
|
261 |
prompt = (
|
262 |
-
f"
|
263 |
-
f"
|
|
|
264 |
f"Question: {query}\n\n"
|
265 |
f"Retrieved Answers:\n{reference_block}\n\n"
|
266 |
-
|
267 |
-
f"{detail_instructions} "
|
268 |
-
f"End with a short inspirational note.\n\n"
|
269 |
-
"Disclaimer: This is general wellness information, not a substitute for professional medical advice."
|
270 |
)
|
271 |
|
272 |
logger.debug(f"Generated prompt for answer expansion: {prompt}")
|
@@ -402,12 +412,10 @@ def handle_query(query: str, detail: bool = False) -> str:
|
|
402 |
# Combine any cached answer (if it exists) with the web result
|
403 |
if cached_answer:
|
404 |
blend_prompt = (
|
405 |
-
f"
|
406 |
-
f"
|
407 |
-
f"
|
408 |
-
f"{web_search_response}
|
409 |
-
"Please combine them into a more creative and accurate response. "
|
410 |
-
"Add positivity and end with a short inspirational note."
|
411 |
)
|
412 |
final_answer = llm._call(blend_prompt).strip()
|
413 |
else:
|
@@ -424,16 +432,15 @@ def handle_query(query: str, detail: bool = False) -> str:
|
|
424 |
return final_answer
|
425 |
|
426 |
# 6) If similarity is sufficient, we will finalize an answer from the knowledge base
|
427 |
-
responses = [ans
|
428 |
|
429 |
# 6a) If we have a cached answer, let's blend it with the new knowledge base data
|
430 |
if cached_answer:
|
431 |
blend_prompt = (
|
432 |
-
f"
|
433 |
-
f"
|
434 |
-
f"
|
435 |
-
f"
|
436 |
-
"Please synthesize these together, adding new insights, creativity, and a short inspirational note at the end."
|
437 |
)
|
438 |
final_answer = llm._call(blend_prompt).strip()
|
439 |
else:
|
@@ -449,7 +456,6 @@ def handle_query(query: str, detail: bool = False) -> str:
|
|
449 |
logger.debug("Exception details:", exc_info=True)
|
450 |
return "An error occurred while processing your request."
|
451 |
|
452 |
-
|
453 |
###############################################################################
|
454 |
# 11) Gradio Interface
|
455 |
###############################################################################
|
|
|
78 |
chat_session = model.start_chat(history=[])
|
79 |
logger.debug("Chat session started.")
|
80 |
|
81 |
+
# Use role-based messages if supported
|
82 |
+
system_message = {
|
83 |
+
"role": "system",
|
84 |
+
"content": "You are Daily Wellness AI, a friendly and professional wellness assistant."
|
85 |
+
}
|
86 |
+
user_message = {
|
87 |
+
"role": "user",
|
88 |
+
"content": prompt
|
89 |
+
}
|
90 |
+
|
91 |
+
chat_session.send_message(system_message)
|
92 |
+
chat_session.send_message(user_message)
|
93 |
+
|
94 |
+
response = chat_session.get_response()
|
95 |
logger.debug(f"Raw response received: {response.text}")
|
96 |
|
97 |
return response.text
|
|
|
199 |
|
200 |
def is_relevant(self, question: str) -> bool:
|
201 |
prompt = (
|
202 |
+
f"Determine whether the following question is relevant to daily wellness.\n\n"
|
203 |
f"Question: {question}\n\n"
|
204 |
f"Is the above question relevant to daily wellness? Respond with 'Yes' or 'No' only."
|
205 |
)
|
|
|
267 |
"Provide a thorough, in-depth explanation, adding relevant tips and context, "
|
268 |
"while remaining creative and brand-aligned. "
|
269 |
if detail else
|
270 |
+
"Provide a concise response in no more than 4 sentences."
|
271 |
)
|
272 |
|
273 |
prompt = (
|
274 |
+
f"Synthesize the following retrieved answers into a single cohesive, creative, and brand-aligned response. "
|
275 |
+
f"{detail_instructions} "
|
276 |
+
f"Conclude with a short inspirational note.\n\n"
|
277 |
f"Question: {query}\n\n"
|
278 |
f"Retrieved Answers:\n{reference_block}\n\n"
|
279 |
+
"Disclaimer: This is general wellness information and not a substitute for professional medical advice."
|
|
|
|
|
|
|
280 |
)
|
281 |
|
282 |
logger.debug(f"Generated prompt for answer expansion: {prompt}")
|
|
|
412 |
# Combine any cached answer (if it exists) with the web result
|
413 |
if cached_answer:
|
414 |
blend_prompt = (
|
415 |
+
f"Combine the following previous answer with the new web results to create a more creative and accurate response. "
|
416 |
+
f"Add positivity and conclude with a short inspirational note.\n\n"
|
417 |
+
f"Previous Answer:\n{cached_answer}\n\n"
|
418 |
+
f"Web Results:\n{web_search_response}"
|
|
|
|
|
419 |
)
|
420 |
final_answer = llm._call(blend_prompt).strip()
|
421 |
else:
|
|
|
432 |
return final_answer
|
433 |
|
434 |
# 6) If similarity is sufficient, we will finalize an answer from the knowledge base
|
435 |
+
responses = [ans for ans, score in retrieved]
|
436 |
|
437 |
# 6a) If we have a cached answer, let's blend it with the new knowledge base data
|
438 |
if cached_answer:
|
439 |
blend_prompt = (
|
440 |
+
f"Combine the previous answer with the newly retrieved answers to enhance creativity and accuracy. "
|
441 |
+
f"Add new insights, creativity, and conclude with a short inspirational note.\n\n"
|
442 |
+
f"Previous Answer:\n{cached_answer}\n\n"
|
443 |
+
f"New Retrieved Answers:\n" + "\n".join(f"- {r}" for r in responses)
|
|
|
444 |
)
|
445 |
final_answer = llm._call(blend_prompt).strip()
|
446 |
else:
|
|
|
456 |
logger.debug("Exception details:", exc_info=True)
|
457 |
return "An error occurred while processing your request."
|
458 |
|
|
|
459 |
###############################################################################
|
460 |
# 11) Gradio Interface
|
461 |
###############################################################################
|