Update app.py
Browse filesinproved handle_query function to give creative response for the already cached data
app.py
CHANGED
@@ -360,7 +360,7 @@ def store_in_cache(query: str, answer: str):
|
|
360 |
###############################################################################
|
361 |
def handle_query(query: str, detail: bool = False) -> str:
|
362 |
"""
|
363 |
-
Main function to process the query.
|
364 |
:param query: The user's question.
|
365 |
:param detail: Whether the user wants a more detailed response.
|
366 |
:return: Response string from Daily Wellness AI.
|
@@ -369,55 +369,87 @@ def handle_query(query: str, detail: bool = False) -> str:
|
|
369 |
return "Please provide a valid question."
|
370 |
|
371 |
try:
|
372 |
-
# 1) Check the
|
373 |
-
cached_answer = get_cached_answer(query)
|
374 |
-
if cached_answer:
|
375 |
-
return cached_answer
|
376 |
-
|
377 |
-
# 2) Sanity Check: Determine if the question is relevant to daily wellness
|
378 |
is_relevant = sanity_checker.is_relevant(query)
|
379 |
if not is_relevant:
|
380 |
return "Your question seems out of context or not related to daily wellness. Please ask a wellness-related question."
|
381 |
|
382 |
-
#
|
383 |
retrieved = retriever.retrieve(query)
|
|
|
|
|
|
|
|
|
|
|
384 |
if not retrieved:
|
|
|
|
|
|
|
|
|
|
|
385 |
return "I'm sorry, I couldn't find an answer to your question."
|
386 |
|
387 |
-
#
|
388 |
top_score = retrieved[0][1] # Assuming the list is sorted descending
|
389 |
similarity_threshold = 0.3 # Adjust this threshold based on empirical results
|
390 |
|
391 |
if top_score < similarity_threshold:
|
392 |
-
# Perform web search using manager_agent
|
393 |
logger.info("Similarity score below threshold. Performing web search.")
|
394 |
web_search_response = manager_agent.run(query)
|
395 |
logger.debug(f"Web search response: {web_search_response}")
|
396 |
|
397 |
-
#
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
404 |
)
|
405 |
-
|
406 |
-
|
407 |
-
|
|
|
408 |
|
409 |
-
#
|
410 |
-
|
411 |
-
|
412 |
|
413 |
-
# 6) Store in cache (ADDED)
|
414 |
-
store_in_cache(query, expanded_answer)
|
415 |
-
return expanded_answer
|
416 |
except Exception as e:
|
417 |
logger.error(f"Error handling query: {e}")
|
418 |
logger.debug("Exception details:", exc_info=True)
|
419 |
return "An error occurred while processing your request."
|
420 |
|
|
|
421 |
###############################################################################
|
422 |
# 11) Gradio Interface
|
423 |
###############################################################################
|
|
|
360 |
###############################################################################
|
361 |
def handle_query(query: str, detail: bool = False) -> str:
|
362 |
"""
|
363 |
+
Main function to process the query.
|
364 |
:param query: The user's question.
|
365 |
:param detail: Whether the user wants a more detailed response.
|
366 |
:return: Response string from Daily Wellness AI.
|
|
|
369 |
return "Please provide a valid question."
|
370 |
|
371 |
try:
|
372 |
+
# 1) Sanity Check: Determine if the question is relevant to daily wellness
|
|
|
|
|
|
|
|
|
|
|
373 |
is_relevant = sanity_checker.is_relevant(query)
|
374 |
if not is_relevant:
|
375 |
return "Your question seems out of context or not related to daily wellness. Please ask a wellness-related question."
|
376 |
|
377 |
+
# 2) Proceed with retrieval from the knowledge base
|
378 |
retrieved = retriever.retrieve(query)
|
379 |
+
|
380 |
+
# 3) Check the cache
|
381 |
+
cached_answer = get_cached_answer(query)
|
382 |
+
|
383 |
+
# 4) If no retrieved data from the knowledge base
|
384 |
if not retrieved:
|
385 |
+
# If we do have a cached answer, return it
|
386 |
+
if cached_answer:
|
387 |
+
logger.info("No relevant entries found in knowledge base. Returning cached answer.")
|
388 |
+
return cached_answer
|
389 |
+
# Otherwise, no KB results and no cache => no answer
|
390 |
return "I'm sorry, I couldn't find an answer to your question."
|
391 |
|
392 |
+
# 5) We have retrieved data; let's check for similarity threshold
|
393 |
top_score = retrieved[0][1] # Assuming the list is sorted descending
|
394 |
similarity_threshold = 0.3 # Adjust this threshold based on empirical results
|
395 |
|
396 |
if top_score < similarity_threshold:
|
397 |
+
# (Low similarity) Perform web search using manager_agent
|
398 |
logger.info("Similarity score below threshold. Performing web search.")
|
399 |
web_search_response = manager_agent.run(query)
|
400 |
logger.debug(f"Web search response: {web_search_response}")
|
401 |
|
402 |
+
# Combine any cached answer (if it exists) with the web result
|
403 |
+
if cached_answer:
|
404 |
+
blend_prompt = (
|
405 |
+
f"You previously provided the following answer:\n\n"
|
406 |
+
f"{cached_answer}\n\n"
|
407 |
+
f"Now, we also have these web results:\n\n"
|
408 |
+
f"{web_search_response}\n\n"
|
409 |
+
"Please combine them into a more creative and accurate response. "
|
410 |
+
"Add positivity and end with a short inspirational note."
|
411 |
+
)
|
412 |
+
final_answer = llm._call(blend_prompt).strip()
|
413 |
+
else:
|
414 |
+
# If no cache, just return the web response
|
415 |
+
final_answer = (
|
416 |
+
f"**Daily Wellness AI**\n\n"
|
417 |
+
f"{web_search_response}\n\n"
|
418 |
+
"Disclaimer: This information is retrieved from the web and is not a substitute for professional medical advice.\n\n"
|
419 |
+
"Wishing you a calm and wonderful day!"
|
420 |
+
)
|
421 |
+
|
422 |
+
# Store in cache
|
423 |
+
store_in_cache(query, final_answer)
|
424 |
+
return final_answer
|
425 |
+
|
426 |
+
# 6) If similarity is sufficient, we will finalize an answer from the knowledge base
|
427 |
+
responses = [ans[0] for ans in retrieved]
|
428 |
+
|
429 |
+
# 6a) If we have a cached answer, let's blend it with the new knowledge base data
|
430 |
+
if cached_answer:
|
431 |
+
blend_prompt = (
|
432 |
+
f"You provided this answer previously:\n\n"
|
433 |
+
f"{cached_answer}\n\n"
|
434 |
+
f"Now, here are some newly retrieved answers:\n\n"
|
435 |
+
f"{chr(10).join(f'- {r}' for r in responses)}\n\n"
|
436 |
+
"Please synthesize these together, adding new insights, creativity, and a short inspirational note at the end."
|
437 |
)
|
438 |
+
final_answer = llm._call(blend_prompt).strip()
|
439 |
+
else:
|
440 |
+
# 6b) No cache => proceed with normal expansions
|
441 |
+
final_answer = answer_expander.expand(query, responses, detail=detail)
|
442 |
|
443 |
+
# 7) Store new or blended answer in cache
|
444 |
+
store_in_cache(query, final_answer)
|
445 |
+
return final_answer
|
446 |
|
|
|
|
|
|
|
447 |
except Exception as e:
|
448 |
logger.error(f"Error handling query: {e}")
|
449 |
logger.debug("Exception details:", exc_info=True)
|
450 |
return "An error occurred while processing your request."
|
451 |
|
452 |
+
|
453 |
###############################################################################
|
454 |
# 11) Gradio Interface
|
455 |
###############################################################################
|