Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -417,7 +417,7 @@ Example final answer:
|
|
417 |
|
418 |
If you can answer the query directly without tools (e.g., a simple greeting, acknowledging instructions), use the 'answer' tool immediately with a direct, polite response.
|
419 |
|
420 |
-
Think step-by-step. Decide if tools are needed based on the **Tool Usage Priority**. If so, which ones? What parameters? Consider if a broad query requires setting a lower `threshold` and higher `max_matches` for `lookup_business_info`. If you have results, how do they help answer the user? Synthesize ALL relevant information into
|
421 |
|
422 |
Output ONLY tool calls within <tool_code> tags or a final answer using the 'answer' tool. Do not include any other text unless it's within the 'answer' tool's parameters.
|
423 |
</system>
|
@@ -446,8 +446,9 @@ def chat_with_bot(user_input, chat_history_state, system_message, max_new_tokens
|
|
446 |
# Append user message to history immediately for display
|
447 |
# The bot message will be updated iteratively
|
448 |
# We append a placeholder now, and update it with the final response later.
|
449 |
-
|
450 |
-
|
|
|
451 |
|
452 |
original_user_input = user_input
|
453 |
print(f"\n--- Starting turn with input: {user_input} ---") # Debug Print
|
@@ -469,7 +470,7 @@ def chat_with_bot(user_input, chat_history_state, system_message, max_new_tokens
|
|
469 |
# Ensure strict alternation: user, assistant, user, assistant...
|
470 |
# Only add complete turns from the *past* history (exclude the current incomplete turn)
|
471 |
# Limit the history length
|
472 |
-
history_to_process = chat_history_state
|
473 |
|
474 |
# Ensure we only take pairs [user, bot] from past history where bot is NOT the initial placeholder
|
475 |
# This guarantees that the last message in `recent_complete_turns` corresponds to a *completed* assistant response.
|
@@ -478,7 +479,7 @@ def chat_with_bot(user_input, chat_history_state, system_message, max_new_tokens
|
|
478 |
if turn is not None and len(turn) == 2 and turn[0] is not None and turn[1] is not None and str(turn[1]).strip() != "..."
|
479 |
]
|
480 |
|
481 |
-
# Take the last MAX_HISTORY_TURNS complete turns
|
482 |
recent_complete_turns = complete_past_turns[max(0, len(complete_past_turns) - MAX_HISTORY_TURNS):]
|
483 |
|
484 |
for user_msg, bot_msg in recent_complete_turns:
|
@@ -704,7 +705,7 @@ def chat_with_bot(user_input, chat_history_state, system_message, max_new_tokens
|
|
704 |
print(f"Executed date calculation for '{query}'. No specific result.")
|
705 |
current_tool_results_text += "</perform_date_calculation_results_for_query>\n"
|
706 |
else:
|
707 |
-
current_tool_results_text += f"<perform_date_calculation_results_for_query query='{query}'><error>Missing 'query' parameter.</error></
|
708 |
print(f"Skipping date calculation tool call: Missing 'query' parameter.")
|
709 |
|
710 |
|
@@ -869,7 +870,7 @@ If Search Results were used, list the relevant URLs under a "Sources:" heading a
|
|
869 |
# Add other fields here...
|
870 |
# if contact_person: gathered_info_summary += f" Contact Person: {contact_person.group(1).strip()}\n"
|
871 |
|
872 |
-
|
873 |
score_match = re.search(r"final_best_score='(.*?)'", lr_content) # Look for final_best_score
|
874 |
score = float(score_match.group(1)) if score_match else 0.0
|
875 |
threshold_match = re.search(r"requested_threshold='(.*?)'", lr_content)
|
@@ -979,19 +980,27 @@ If Search Results were used, list the relevant URLs under a "Sources:" heading a
|
|
979 |
|
980 |
|
981 |
# Update the last message in chat_history_state with the final response
|
982 |
-
chat_history_state
|
|
|
|
|
983 |
|
984 |
# Remove the [[TEXT]] marker from the chat_history_state *before* yielding
|
985 |
# Iterate through the history and clean each item
|
986 |
-
|
987 |
for user_msg, bot_msg in chat_history_state:
|
988 |
cleaned_user_msg = user_msg.replace('[[TEXT]] ', '') if isinstance(user_msg, str) else user_msg
|
989 |
cleaned_bot_msg = bot_msg.replace('[[TEXT]] ', '') if isinstance(bot_msg, str) else bot_msg
|
990 |
-
|
|
|
|
|
|
|
991 |
|
992 |
|
993 |
print(f"\n--- Final Response: {final_response_text[:100]}... ---") # Debug Print
|
994 |
-
|
|
|
|
|
|
|
995 |
|
996 |
# ββββββββββββββββββββββββββ
|
997 |
# 3 Gradio interface
|
@@ -1030,4 +1039,4 @@ if __name__ == "__main__":
|
|
1030 |
# The print statement for RAG status is added here, before launching the demo.
|
1031 |
print(f"RAG functionality available: {business_info_available}")
|
1032 |
|
1033 |
-
demo.launch(debug=True) # Added
|
|
|
417 |
|
418 |
If you can answer the query directly without tools (e.g., a simple greeting, acknowledging instructions), use the 'answer' tool immediately with a direct, polite response.
|
419 |
|
420 |
+
Think step-by-step. Decide if tools are needed based on the **Tool Usage Priority**. If so, which ones? What parameters? Consider if a broad query requires setting a lower `threshold` and higher `max_matches` for `lookup_business_info`. If you have results, how do they help answer the user? Synthesize ALL relevant information into a comprehensive answer. If results are insufficient or indicate an error, how should you respond gracefully? Finally, formulate the comprehensive answer using the 'answer' tool.
|
421 |
|
422 |
Output ONLY tool calls within <tool_code> tags or a final answer using the 'answer' tool. Do not include any other text unless it's within the 'answer' tool's parameters.
|
423 |
</system>
|
|
|
446 |
# Append user message to history immediately for display
|
447 |
# The bot message will be updated iteratively
|
448 |
# We append a placeholder now, and update it with the final response later.
|
449 |
+
# Ensure the initial yield is a list of lists for Gradio, even if it's just one turn
|
450 |
+
initial_history = chat_history_state + [[user_input, "..."]]
|
451 |
+
yield initial_history # Yield state with placeholder
|
452 |
|
453 |
original_user_input = user_input
|
454 |
print(f"\n--- Starting turn with input: {user_input} ---") # Debug Print
|
|
|
470 |
# Ensure strict alternation: user, assistant, user, assistant...
|
471 |
# Only add complete turns from the *past* history (exclude the current incomplete turn)
|
472 |
# Limit the history length
|
473 |
+
history_to_process = chat_history_state # Use the full history passed to the function initially
|
474 |
|
475 |
# Ensure we only take pairs [user, bot] from past history where bot is NOT the initial placeholder
|
476 |
# This guarantees that the last message in `recent_complete_turns` corresponds to a *completed* assistant response.
|
|
|
479 |
if turn is not None and len(turn) == 2 and turn[0] is not None and turn[1] is not None and str(turn[1]).strip() != "..."
|
480 |
]
|
481 |
|
482 |
+
# Take the last MAX_HISTORY_TURNS complete turns
|
483 |
recent_complete_turns = complete_past_turns[max(0, len(complete_past_turns) - MAX_HISTORY_TURNS):]
|
484 |
|
485 |
for user_msg, bot_msg in recent_complete_turns:
|
|
|
705 |
print(f"Executed date calculation for '{query}'. No specific result.")
|
706 |
current_tool_results_text += "</perform_date_calculation_results_for_query>\n"
|
707 |
else:
|
708 |
+
current_tool_results_text += f"<perform_date_calculation_results_for_query query='{query}'><error>Missing 'query' parameter.</error></lookup_business_info_results_for_query>\n"
|
709 |
print(f"Skipping date calculation tool call: Missing 'query' parameter.")
|
710 |
|
711 |
|
|
|
870 |
# Add other fields here...
|
871 |
# if contact_person: gathered_info_summary += f" Contact Person: {contact_person.group(1).strip()}\n"
|
872 |
|
873 |
+
elif "No relevant matches found" in lr_content:
|
874 |
score_match = re.search(r"final_best_score='(.*?)'", lr_content) # Look for final_best_score
|
875 |
score = float(score_match.group(1)) if score_match else 0.0
|
876 |
threshold_match = re.search(r"requested_threshold='(.*?)'", lr_content)
|
|
|
980 |
|
981 |
|
982 |
# Update the last message in chat_history_state with the final response
|
983 |
+
# Find the last turn in the original chat_history_state (which was already updated with the placeholder)
|
984 |
+
if chat_history_state and len(chat_history_state) > 0:
|
985 |
+
chat_history_state[-1][1] = final_response_text # Update the bot's message in the last turn
|
986 |
|
987 |
# Remove the [[TEXT]] marker from the chat_history_state *before* yielding
|
988 |
# Iterate through the history and clean each item
|
989 |
+
cleaned_chat_history_list_of_lists = []
|
990 |
for user_msg, bot_msg in chat_history_state:
|
991 |
cleaned_user_msg = user_msg.replace('[[TEXT]] ', '') if isinstance(user_msg, str) else user_msg
|
992 |
cleaned_bot_msg = bot_msg.replace('[[TEXT]] ', '') if isinstance(bot_msg, str) else bot_msg
|
993 |
+
cleaned_chat_history_list_of_lists.append([cleaned_user_msg, cleaned_bot_msg])
|
994 |
+
|
995 |
+
# Convert the cleaned list of lists to a list of tuples as required by Gradio
|
996 |
+
cleaned_chat_history_list_of_tuples = [(user_msg, bot_msg) for user_msg, bot_msg in cleaned_chat_history_list_of_lists]
|
997 |
|
998 |
|
999 |
print(f"\n--- Final Response: {final_response_text[:100]}... ---") # Debug Print
|
1000 |
+
|
1001 |
+
# Yield the cleaned history as a list of tuples
|
1002 |
+
yield cleaned_chat_history_list_of_tuples # Yield the cleaned history as tuples
|
1003 |
+
|
1004 |
|
1005 |
# ββββββββββββββββββββββββββ
|
1006 |
# 3 Gradio interface
|
|
|
1039 |
# The print statement for RAG status is added here, before launching the demo.
|
1040 |
print(f"RAG functionality available: {business_info_available}")
|
1041 |
|
1042 |
+
demo.launch(debug=True) # Added
|