Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -503,7 +503,7 @@ def chat_with_llm(df,question):
|
|
503 |
def bind_llm(llm, tools,prompt_template):
|
504 |
llm = llm.bind()
|
505 |
agent = create_tool_calling_agent(llm, tools, ChatPromptTemplate.from_template(prompt_template))
|
506 |
-
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
507 |
return agent_executor
|
508 |
|
509 |
# Define input and output models using Pydantic
|
@@ -697,10 +697,10 @@ def answer_question_thread(user_question, chatbot,audio=None):
|
|
697 |
except sr.RequestError:
|
698 |
user_question = "Could not request results from Google Speech Recognition service."
|
699 |
"""
|
700 |
-
|
701 |
while iterations < max_iterations:
|
702 |
|
703 |
-
response = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]}, early_stopping_method="generate")
|
704 |
#create_file_HF()
|
705 |
if isinstance(response, dict):
|
706 |
response_text = response.get("output", "")
|
@@ -754,13 +754,13 @@ def answer_question_thread(user_question, chatbot,audio=None):
|
|
754 |
return user_question, response_text
|
755 |
else:
|
756 |
if ("max iterations" in response_text):
|
757 |
-
print
|
758 |
-
|
759 |
-
|
760 |
-
|
761 |
-
print(
|
762 |
-
|
763 |
-
|
764 |
return user_question, response_text
|
765 |
# response_text = response_text.replace('\n', ' ').replace(' ', ' ').strip()
|
766 |
# return response_text
|
|
|
503 |
def bind_llm(llm, tools,prompt_template):
|
504 |
llm = llm.bind()
|
505 |
agent = create_tool_calling_agent(llm, tools, ChatPromptTemplate.from_template(prompt_template))
|
506 |
+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True,return_intermediate_steps=True ) # Enable intermediate steps tracking)
|
507 |
return agent_executor
|
508 |
|
509 |
# Define input and output models using Pydantic
|
|
|
697 |
except sr.RequestError:
|
698 |
user_question = "Could not request results from Google Speech Recognition service."
|
699 |
"""
|
700 |
+
intermediate_steps = []
|
701 |
while iterations < max_iterations:
|
702 |
|
703 |
+
response, intermediate_steps = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]}, early_stopping_method="generate")
|
704 |
#create_file_HF()
|
705 |
if isinstance(response, dict):
|
706 |
response_text = response.get("output", "")
|
|
|
754 |
return user_question, response_text
|
755 |
else:
|
756 |
if ("max iterations" in response_text):
|
757 |
+
# Loop through and print all intermediate steps
|
758 |
+
for i, (action, observation) in enumerate(intermediate_steps):
|
759 |
+
print(f"\nIteration {i + 1}:")
|
760 |
+
print(f" Action: {action.tool}")
|
761 |
+
print(f" Tool Input: {action.tool_input}")
|
762 |
+
print(f" Observation: {observation}")
|
763 |
+
|
764 |
return user_question, response_text
|
765 |
# response_text = response_text.replace('\n', ' ').replace(' ', ' ').strip()
|
766 |
# return response_text
|