Redmind commited on
Commit
b46e6d5
·
verified ·
1 Parent(s): 9a9c0b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -1
app.py CHANGED
@@ -503,7 +503,7 @@ def chat_with_llm(df,question):
503
  def bind_llm(llm, tools,prompt_template):
504
  llm = llm.bind()
505
  agent = create_tool_calling_agent(llm, tools, ChatPromptTemplate.from_template(prompt_template))
506
- agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
507
  return agent_executor
508
 
509
  # Define input and output models using Pydantic
@@ -705,6 +705,13 @@ def answer_question_thread(user_question, chatbot,audio=None):
705
  while iterations < max_iterations:
706
 
707
  response = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]}, early_stopping_method="generate")
 
 
 
 
 
 
 
708
  #create_file_HF()
709
  if isinstance(response, dict):
710
  response_text = response.get("output", "")
 
503
  def bind_llm(llm, tools,prompt_template):
504
  llm = llm.bind()
505
  agent = create_tool_calling_agent(llm, tools, ChatPromptTemplate.from_template(prompt_template))
506
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, max_iterations=3)
507
  return agent_executor
508
 
509
  # Define input and output models using Pydantic
 
705
  while iterations < max_iterations:
706
 
707
  response = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]}, early_stopping_method="generate")
708
+ # After invoking, check if max iterations are reached
709
+ if agent_executor.iterations >= agent_executor.max_iterations:
710
+ if agent_executor.history:
711
+ return agent_executor.history[-1].get("output") # Extract the last response from the history
712
+
713
+
714
+
715
  #create_file_HF()
716
  if isinstance(response, dict):
717
  response_text = response.get("output", "")