Redmind commited on
Commit
4149f35
·
verified ·
1 Parent(s): 2dec486

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -503,7 +503,7 @@ def chat_with_llm(df,question):
503
  def bind_llm(llm, tools,prompt_template):
504
  llm = llm.bind()
505
  agent = create_tool_calling_agent(llm, tools, ChatPromptTemplate.from_template(prompt_template))
506
- agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
507
  return agent_executor
508
 
509
  # Define input and output models using Pydantic
@@ -697,10 +697,10 @@ def answer_question_thread(user_question, chatbot,audio=None):
697
  except sr.RequestError:
698
  user_question = "Could not request results from Google Speech Recognition service."
699
  """
700
-
701
  while iterations < max_iterations:
702
 
703
- response = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]}, early_stopping_method="generate")
704
  #create_file_HF()
705
  if isinstance(response, dict):
706
  response_text = response.get("output", "")
@@ -754,13 +754,13 @@ def answer_question_thread(user_question, chatbot,audio=None):
754
  return user_question, response_text
755
  else:
756
  if ("max iterations" in response_text):
757
- print("11111::")
758
- print(agent_executor.agent.return_values)
759
- print("1122::")
760
- for response_values in agent_executor.agent.return_values:
761
- print(response_values)
762
- print("222:::")
763
- print(agent_executor.agent.return_stopped_response)
764
  return user_question, response_text
765
  # response_text = response_text.replace('\n', ' ').replace(' ', ' ').strip()
766
  # return response_text
 
503
  def bind_llm(llm, tools,prompt_template):
504
  llm = llm.bind()
505
  agent = create_tool_calling_agent(llm, tools, ChatPromptTemplate.from_template(prompt_template))
506
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True,return_intermediate_steps=True ) # Enable intermediate steps tracking)
507
  return agent_executor
508
 
509
  # Define input and output models using Pydantic
 
697
  except sr.RequestError:
698
  user_question = "Could not request results from Google Speech Recognition service."
699
  """
700
+ intermediate_steps = []
701
  while iterations < max_iterations:
702
 
703
+ response, intermediate_steps = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]}, early_stopping_method="generate")
704
  #create_file_HF()
705
  if isinstance(response, dict):
706
  response_text = response.get("output", "")
 
754
  return user_question, response_text
755
  else:
756
  if ("max iterations" in response_text):
757
+ # Loop through and print all intermediate steps
758
+ for i, (action, observation) in enumerate(intermediate_steps):
759
+ print(f"\nIteration {i + 1}:")
760
+ print(f" Action: {action.tool}")
761
+ print(f" Tool Input: {action.tool_input}")
762
+ print(f" Observation: {observation}")
763
+
764
  return user_question, response_text
765
  # response_text = response_text.replace('\n', ' ').replace(' ', ' ').strip()
766
  # return response_text