Lasdw commited on
Commit
7cf5031
·
1 Parent(s): bed229a

Added apify search tool and ReAct

Browse files
Files changed (6) hide show
  1. README.md +1 -1
  2. agent.py +494 -199
  3. agent_graph.png +0 -0
  4. agent_langgraph.py +399 -0
  5. app.py +1 -1
  6. requirements.txt +2 -1
README.md CHANGED
@@ -12,4 +12,4 @@ hf_oauth: true
12
  hf_oauth_expiration_minutes: 480
13
  ---
14
 
15
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
12
  hf_oauth_expiration_minutes: 480
13
  ---
14
 
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
agent.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
- from typing import TypedDict, Annotated
3
  from langgraph.graph.message import add_messages
4
- from langchain_core.messages import AnyMessage, HumanMessage, AIMessage, SystemMessage
5
  from langgraph.prebuilt import ToolNode
6
  from langchain.tools import Tool
7
  from langgraph.graph import START, END, StateGraph
@@ -13,6 +13,14 @@ import subprocess
13
  import tempfile
14
  import time
15
  import random
 
 
 
 
 
 
 
 
16
 
17
 
18
 
@@ -102,58 +110,226 @@ def run_python_code(code: str):
102
  except Exception as e:
103
  return f"Error executing code: {str(e)}"
104
 
105
- # Create the Python code execution tool
106
- code_tool = Tool(
107
- name="python_code",
108
- func=run_python_code,
109
- description="Execute Python code. Provide the complete Python code as a string. The code will be executed and the output will be returned. Use this for calculations, data processing, or any task that can be solved with Python."
110
- )
111
-
112
- # Custom search function with error handling
113
- def safe_web_search(query: str) -> str:
114
- """Search the web safely with error handling and retry logic."""
 
 
 
 
 
 
 
 
 
 
 
115
  try:
116
- # Use the DuckDuckGoSearchRun tool
117
- search_tool = DuckDuckGoSearchRun()
118
- result = search_tool.invoke(query)
 
 
 
 
 
 
 
 
 
119
 
120
- # If we get an empty result, provide a fallback
121
- if not result or len(result.strip()) < 10:
122
- return f"Unable to find specific information about '{query}'. Please try a different search query or check a reliable source like Wikipedia."
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
- return result
125
  except Exception as e:
126
- # Add a small random delay to avoid rate limiting
127
- time.sleep(random.uniform(1, 2))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
- # Return a helpful error message with suggestions
130
- error_msg = f"I encountered an issue while searching for '{query}': {str(e)}. "
131
- return error_msg
132
-
133
- # Create the search tool
134
- search_tool = Tool(
135
- name="web_search",
136
- func=safe_web_search,
137
- description="Search the web for current information. Provide a specific search query."
138
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
 
140
  # System prompt to guide the model's behavior
141
- SYSTEM_PROMPT = """You are a genius AI assistant called TurboNerd.
142
- Always provide accurate and helpful responses based on the information you find. You have tools at your disposal to help, use them whenever you can to improve the accuracy of your responses.
 
 
 
 
 
143
 
144
- When you receive an input from the user, break it into smaller parts and address each part systematically:
 
 
145
 
146
- 1. For information retrieval (like finding current data, statistics, etc.), use the web_search tool.
147
- - If the search fails, don't repeatedly attempt identical searches. Provide the best information you have and be honest about limitations.
148
 
149
- 2. For calculations, data processing, or computational tasks, use the python_code tool:
150
- - Write complete, self-contained Python code
151
- - Include print statements for results
152
- - Keep code simple and concise
153
 
 
 
 
 
 
 
154
 
155
- Keep your final answer concise and direct, addressing all parts of the user's question clearly. DO NOT include any other text in your response, just the answer.
156
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  #Your response will be evaluated for accuracy and completeness. After you provide an answer, an evaluator will check your work and may ask you to improve it. The evaluation process has a maximum of 3 attempts.
158
 
159
  # Generate the chat interface, including the tools
@@ -163,189 +339,324 @@ llm = ChatOpenAI(
163
  )
164
 
165
  chat = llm
166
- tools = [search_tool, code_tool]
167
- chat_with_tools = chat.bind_tools(tools)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
169
  # Generate the AgentState and Agent graph
170
- class AgentState(TypedDict):
 
 
 
 
171
  messages: Annotated[list[AnyMessage], add_messages]
 
 
172
 
173
- def assistant(state: AgentState):
174
- # Add system message if it's the first message
175
  print("Assistant Called...\n\n")
176
- print(f"Assistant state keys: {state.keys()}")
177
- print(f"Assistant message count: {len(state['messages'])}")
178
 
179
- if len(state["messages"]) == 1 and isinstance(state["messages"][0], HumanMessage):
180
- messages = [SystemMessage(content=SYSTEM_PROMPT)] + state["messages"]
181
- else:
182
- messages = state["messages"]
183
 
184
- response = chat_with_tools.invoke(messages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
  print(f"Assistant response type: {type(response)}")
186
- if hasattr(response, 'tool_calls') and response.tool_calls:
187
- print(f"Tool calls detected: {len(response.tool_calls)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
 
189
  return {
190
- "messages": [response],
 
 
191
  }
192
 
193
- # Add evaluator function (commented out)
194
- """
195
- def evaluator(state: AgentState):
196
- print("Evaluator Called...\n\n")
197
- print(f"Evaluator state keys: {state.keys()}")
198
- print(f"Evaluator message count: {len(state['messages'])}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
 
200
- # Get the current evaluation attempt count or initialize to 0
201
- attempt_count = state.get("evaluation_attempt_count", 0)
202
 
203
- # Create a new evaluator LLM instance
204
- evaluator_llm = ChatOpenAI(
205
- model="gpt-4o-mini",
206
- temperature=0
 
 
 
 
 
 
 
 
 
 
 
207
  )
208
 
209
- # Create evaluation prompt
210
- evaluation_prompt = f\"""You are an evaluator for AI assistant responses. Your job is to:
211
-
212
- 1. Check if the answer is complete and accurate
213
- - Does it address all parts of the user's question?
214
- - Is the information factually correct to the best of your knowledge?
215
-
216
- 2. Identify specific improvements needed, if any
217
- - Be precise about what needs to be fixed
218
-
219
- 3. Return your evaluation in one of these formats:
220
- - "ACCEPT: [brief reason]" if the answer is good enough
221
- - "IMPROVE: [specific instructions]" if improvements are needed
222
-
223
- This is evaluation attempt {attempt_count + 1} out of 3 maximum attempts.
224
-
225
- Acceptance criteria:
226
- - On attempts 1-2: The answer must be complete, accurate, and well-explained
227
- - On attempt 3: Accept the answer if it's reasonably correct, even if not perfect
228
-
229
- Available tools the assistant can use:
230
- - web_search: For retrieving information from the web
231
- - python_code: For executing Python code to perform calculations or data processing
232
 
233
- Be realistic about tool limitations - if a tool is failing repeatedly, don't ask the assistant to keep trying it.
234
- \"""
 
235
 
236
- # Get the last message (the current answer)
237
- last_message = state["messages"][-1]
238
- print(f"Last message to evaluate: {last_message.content}")
239
 
240
- # Create evaluation message
241
- evaluation_message = [
242
- SystemMessage(content=evaluation_prompt),
243
- HumanMessage(content=f"Evaluate this answer: {last_message.content}")
244
- ]
 
 
 
 
 
 
 
 
 
 
245
 
246
- # Get evaluation
247
- evaluation = evaluator_llm.invoke(evaluation_message)
248
- print(f"Evaluation result: {evaluation.content}")
249
 
250
- # Create an AIMessage with the evaluation content
251
- evaluation_ai_message = AIMessage(content=evaluation.content)
 
 
 
 
 
 
 
 
252
 
253
- # Return both the evaluation message and the evaluation result
254
  return {
255
- "messages": state["messages"] + [evaluation_ai_message],
256
- "evaluation_result": evaluation.content,
257
- "evaluation_attempt_count": attempt_count + 1
258
  }
259
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
 
261
  # Create the graph
262
  def create_agent_graph() -> StateGraph:
263
- """Create the complete agent graph."""
264
  builder = StateGraph(AgentState)
265
 
266
  # Define nodes: these do the work
267
  builder.add_node("assistant", assistant)
268
- builder.add_node("tools", ToolNode(tools))
269
- # builder.add_node("evaluator", evaluator) # Commented out evaluator
270
 
271
  # Define edges: these determine how the control flow moves
272
  builder.add_edge(START, "assistant")
273
 
274
- # First, check if the assistant's output contains tool calls
275
- def debug_tools_condition(state):
276
- # Check if the last message has tool calls
277
- last_message = state["messages"][-1]
278
- print(f"Last message type: {type(last_message)}")
279
-
280
- has_tool_calls = False
281
- if hasattr(last_message, "tool_calls") and last_message.tool_calls:
282
- has_tool_calls = True
283
- print(f"Tool calls found: {last_message.tool_calls}")
284
-
285
- result = "tools" if has_tool_calls else None
286
- print(f"Tools condition result: {result}")
287
- return result
288
-
289
- builder.add_conditional_edges(
290
- "assistant",
291
- debug_tools_condition,
292
- {
293
- "tools": "tools",
294
- None: END # Changed from evaluator to END
295
- }
296
- )
297
 
298
- # Tools always goes back to assistant
299
- builder.add_edge("tools", "assistant")
300
 
301
- # Add evaluation edges with attempt counter (commented out)
302
- """
303
- def evaluation_condition(state: AgentState) -> str:
304
- # Print the state keys to debug
305
- print(f"Evaluation condition state keys: {state.keys()}")
306
-
307
- # Get the evaluation result from the state
308
- evaluation_result = state.get("evaluation_result", "")
309
- print(f"Evaluation result: {evaluation_result}")
310
-
311
- # Get the evaluation attempt count or initialize to 0
312
- attempt_count = state.get("evaluation_attempt_count", 0)
313
-
314
- # Increment the attempt count
315
- attempt_count += 1
316
- print(f"Evaluation attempt: {attempt_count}")
317
-
318
- # If we've reached max attempts or evaluation accepts the answer, end
319
- if attempt_count >= 3 or evaluation_result.startswith("ACCEPT"):
320
- return "end"
321
- else:
322
- return "assistant"
323
 
 
324
  builder.add_conditional_edges(
325
- "evaluator",
326
- evaluation_condition,
327
  {
328
- "end": END,
329
- "assistant": "assistant"
 
330
  }
331
  )
332
- """
 
 
 
333
 
334
  # Compile with a reasonable recursion limit to prevent infinite loops
335
  return builder.compile()
336
 
337
  # Main agent class that integrates with your existing app.py
338
  class TurboNerd:
339
- def __init__(self, max_execution_time=30):
340
  self.graph = create_agent_graph()
341
- self.tools = tools
342
  self.max_execution_time = max_execution_time # Maximum execution time in seconds
 
 
 
 
 
343
 
344
  def __call__(self, question: str) -> str:
345
  """Process a question and return an answer."""
346
  # Initialize the state with the question
347
  initial_state = {
348
- "messages": [HumanMessage(content=question)],
 
 
349
  }
350
 
351
  # Run the graph with timeout
@@ -354,46 +665,30 @@ class TurboNerd:
354
 
355
  try:
356
  # Set a reasonable recursion limit
357
- result = self.graph.invoke(initial_state, config={"recursion_limit": 10})
358
 
359
  # Print the final state for debugging
360
  print(f"Final state keys: {result.keys()}")
361
  print(f"Final message count: {len(result['messages'])}")
362
 
363
- # Extract the final message
364
- final_message = result["messages"][-1]
365
- return final_message.content
366
-
367
- except Exception as e:
368
- elapsed_time = time.time() - start_time
369
- print(f"Error after {elapsed_time:.2f} seconds: {str(e)}")
370
 
371
- # If we've been running too long, return a timeout message
372
- if elapsed_time > self.max_execution_time:
373
- return f"""I wasn't able to complete the full analysis within the time limit, but here's what I found:
374
-
375
- The population of New York City is approximately 8.8 million (as of the 2020 Census).
376
-
377
- For a population doubling at 2% annual growth rate, it would take about 35 years. This can be calculated using the Rule of 70, which states that dividing 70 by the growth rate gives the approximate doubling time:
378
-
379
- 70 ÷ 2 = 35 years
380
-
381
- You can verify this with a Python calculation:
382
- ```python
383
- years = 0
384
- population = 1
385
- while population < 2:
386
- population *= 1.02 # 2% growth
387
- years += 1
388
- print(years) # Result: 35
389
- ```"""
390
 
 
 
391
  # Otherwise return the error
392
  return f"I encountered an error while processing your question: {str(e)}"
393
 
394
  # Example usage:
395
  if __name__ == "__main__":
396
- agent = TurboNerd(max_execution_time=30)
397
- response = agent("What is the population of New York City? Then write a Python program to calculate how many years it would take for the population to double at a 2% annual growth rate.")
398
  print("\nFinal Response:")
399
  print(response)
 
1
  import os
2
+ from typing import TypedDict, Annotated, Dict, Any, Optional, Union, List
3
  from langgraph.graph.message import add_messages
4
+ from langchain_core.messages import AnyMessage, HumanMessage, AIMessage, SystemMessage, ToolMessage
5
  from langgraph.prebuilt import ToolNode
6
  from langchain.tools import Tool
7
  from langgraph.graph import START, END, StateGraph
 
13
  import tempfile
14
  import time
15
  import random
16
+ import json
17
+ import re
18
+ import requests
19
+ from urllib.parse import quote
20
+ import sys
21
+
22
+ from apify_client import ApifyClient
23
+
24
 
25
 
26
 
 
110
  except Exception as e:
111
  return f"Error executing code: {str(e)}"
112
 
113
+ # Apify-based search function
114
+ def apify_google_search(query: str, limit: int = 10) -> str:
115
+ """
116
+ Use Apify's Google Search Results Scraper to get search results
117
+
118
+ Args:
119
+ query: The search query string
120
+ limit: Number of results to return (10, 20, 30, 40, 50, 100)
121
+
122
+ Returns:
123
+ Formatted search results as a string
124
+ """
125
+ # You would need to provide a valid Apify API token
126
+ # You can get one by signing up at https://apify.com/
127
+ # Replace this with your actual Apify API token or set as environment variable
128
+ APIFY_API_TOKEN = os.environ.get("APIFY_API_TOKEN", "")
129
+
130
+ if not APIFY_API_TOKEN:
131
+ print("No Apify API token found. Using fallback search method.")
132
+ return fallback_search(query)
133
+
134
  try:
135
+ # Initialize the ApifyClient with API token
136
+ client = ApifyClient(APIFY_API_TOKEN)
137
+
138
+ # Prepare the Actor input - convert limit to string as required by the API
139
+ run_input = {
140
+ "keyword": query,
141
+ "limit": str(limit), # Convert to string as required by the API
142
+ "country": "US"
143
+ }
144
+
145
+ # The Actor ID for the Google Search Results Scraper
146
+ ACTOR_ID = "563JCPLOqM1kMmbbP"
147
 
148
+ print(f"Starting Apify search for: '{query}'")
149
+
150
+ # Run the Actor and wait for it to finish (with timeout)
151
+ run = client.actor(ACTOR_ID).call(run_input=run_input, timeout_secs=60)
152
+
153
+ if not run or not run.get("defaultDatasetId"):
154
+ print("Failed to get results from Apify actor")
155
+ return fallback_search(query)
156
+
157
+ # Fetch Actor results from the run's dataset
158
+ results = []
159
+ for item in client.dataset(run["defaultDatasetId"]).iterate_items():
160
+ results.append(item)
161
+
162
+ # Format and return the results
163
+ return format_search_results(results, query)
164
 
 
165
  except Exception as e:
166
+ print(f"Error using Apify: {str(e)}")
167
+ return fallback_search(query)
168
+
169
+ def format_search_results(results: List[Dict], query: str) -> str:
170
+ """Format the search results into a readable string"""
171
+ if not results or len(results) == 0:
172
+ return f"No results found for query: {query}"
173
+
174
+ print(f"Raw search results: {str(results)[:1000]}...")
175
+
176
+ # Extract search results from the Apify output
177
+ formatted_results = f"Search results for '{query}':\n\n"
178
+
179
+ # Check if results is a list of dictionaries or a dictionary with nested results
180
+ if isinstance(results, dict) and "results" in results:
181
+ items = results["results"]
182
+ elif isinstance(results, list):
183
+ items = results
184
+ else:
185
+ return f"Unable to process results for query: {query}"
186
+
187
+ # Handle different Apify result formats
188
+ if len(items) > 0:
189
+ # Check the structure of the first item to determine format
190
+ first_item = items[0]
191
 
192
+ # If item has 'organicResults', this is the format from some Apify actors
193
+ if isinstance(first_item, dict) and "organicResults" in first_item:
194
+ organic_results = first_item.get("organicResults", [])
195
+ for i, result in enumerate(organic_results[:10], 1):
196
+ if "title" in result and "url" in result:
197
+ formatted_results += f"{i}. {result['title']}\n"
198
+ formatted_results += f" URL: {result['url']}\n"
199
+ if "snippet" in result:
200
+ formatted_results += f" {result['snippet']}\n"
201
+ formatted_results += "\n"
202
+ else:
203
+ # Standard format with title/url/description
204
+ for i, result in enumerate(items[:10], 1):
205
+ if "title" in result and "url" in result:
206
+ formatted_results += f"{i}. {result['title']}\n"
207
+ formatted_results += f" URL: {result['url']}\n"
208
+ if "description" in result:
209
+ formatted_results += f" {result['description']}\n"
210
+ elif "snippet" in result:
211
+ formatted_results += f" {result['snippet']}\n"
212
+ formatted_results += "\n"
213
+
214
+ return str(items)
215
+
216
+ def fallback_search(query: str) -> str:
217
+ """Fallback search method using DuckDuckGo when Apify is not available"""
218
+ try:
219
+ search_tool = DuckDuckGoSearchRun()
220
+ result = search_tool.invoke(query)
221
+ return "Observation: " + result
222
+ except Exception as e:
223
+ return f"Search error: {str(e)}. Please try a different query or method."
224
+
225
+ # Custom search function with improved error handling
226
+ def safe_web_search(query: str) -> str:
227
+ """Search the web safely with error handling and retry logic."""
228
+ if not query:
229
+ return "Error: No search query provided. Please specify what you want to search for."
230
+
231
+ # Try using Apify first, if it fails it will use the fallback
232
+ return "Observation: " + apify_google_search(query)
233
+
234
+ # The code below is kept for reference but won't be executed
235
+ max_retries = 3
236
+ backoff_factor = 1.5
237
+
238
+ for attempt in range(max_retries):
239
+ try:
240
+ # Use the DuckDuckGoSearchRun tool
241
+ search_tool = DuckDuckGoSearchRun()
242
+ result = search_tool.invoke(query)
243
+
244
+ # If we get an empty result, provide a helpful message
245
+ if not result or len(result.strip()) < 10:
246
+ return f"The search for '{query}' did not return any useful results. Please try a more specific query or a different search engine."
247
+
248
+ return "Observation: " + result
249
+
250
+ except Exception as e:
251
+ # If we're being rate limited
252
+ if "Ratelimit" in str(e) or "429" in str(e):
253
+ if attempt < max_retries - 1:
254
+ wait_time = backoff_factor ** attempt
255
+ print(f"Rate limited, waiting {wait_time:.2f} seconds before retrying...")
256
+ time.sleep(wait_time)
257
+ else:
258
+ # On last attempt, return a helpful error
259
+ error_msg = f"I'm currently unable to search for '{query}' due to service rate limits. "
260
+ return error_msg
261
+ else:
262
+ # For other types of errors
263
+ return f"Error while searching for '{query}': {str(e)}"
264
+
265
+ return f"Failed to search for '{query}' after multiple attempts due to rate limiting."
266
 
267
  # System prompt to guide the model's behavior
268
+ SYSTEM_PROMPT = """Answer the following questions as best you can. DO NOT rely on your internal knowledge unless web searches are rate-limited or you're specifically instructed to. You have access to the following tools:
269
+
270
+ web_search: Search the web for current information. Provide a specific search query.
271
+ python_code: Execute Python code. Provide the complete Python code as a string. Use this tool to calculate math problems.
272
+
273
+ The way you use the tools is by specifying a json blob.
274
+ Specifically, this json should have an `action` key (with the name of the tool to use) and an `action_input` key (with the input to the tool going here).
275
 
276
+ The only values that should be in the "action" field are:
277
+ web_search: Search the web for current information, args: {"query": {"type": "string"}}
278
+ python_code: Execute Python code, args: {"code": {"type": "string"}}
279
 
280
+ IMPORTANT: Make sure your JSON is properly formatted with double quotes around keys and string values.
 
281
 
282
+ example use:
 
 
 
283
 
284
+ ```json
285
+ {
286
+ "action": "web_search",
287
+ "action_input": {"query": "population of New York City"}
288
+ }
289
+ ```
290
 
291
+ ALWAYS follow this specific format for your responses. Your entire response will follow this pattern:
292
+
293
+ Question: [the user's question]
294
+
295
+ Thought: [your reasoning about what to do next]
296
+
297
+ Action:
298
+ ```json
299
+ {
300
+ "action": "[tool_name]",
301
+ "action_input": {"[parameter_name]": "[parameter_value]"}
302
+ }
303
+ ```
304
+
305
+ Observation: [the result from the tool will appear here]
306
+
307
+ Thought: [your reasoning after seeing the observation]
308
+
309
+ Action:
310
+ ```json
311
+ {
312
+ "action": "[tool_name]",
313
+ "action_input": {"[parameter_name]": "[parameter_value]"}
314
+ }
315
+ ```
316
+
317
+ Observation: [another tool result will appear here]
318
+
319
+ IMPORTANT: You MUST strictly follow the ReAct pattern (Reasoning, Action, Observation):
320
+ 1. First reason about the problem in the "Thought" section
321
+ 2. Then decide what action to take in the "Action" section (using the tools)
322
+ 3. Wait for an observation from the tool
323
+ 4. Based on the observation, continue with another thought
324
+ 5. This cycle repeats until you have enough information to provide a final answer
325
+
326
+ ... (this Thought/Action/Observation cycle can repeat as needed) ...
327
+
328
+ Thought: I now know the final answer
329
+
330
+ Final Answer: Directly answer the question in the shortest possible way. For example, if the question is "What is the capital of France?", the answer should be "Paris" without any additional text. If the question is "What is the population of New York City?", the answer should be "8.4 million" without any additional text.
331
+
332
+ Now begin! Reminder to ALWAYS use the exact characters `Final Answer:` when you provide a definitive answer."""
333
  #Your response will be evaluated for accuracy and completeness. After you provide an answer, an evaluator will check your work and may ask you to improve it. The evaluation process has a maximum of 3 attempts.
334
 
335
  # Generate the chat interface, including the tools
 
339
  )
340
 
341
  chat = llm
342
+ # Tools are defined but not bound to the LLM here
343
+ tools_config = [
344
+ {
345
+ "name": "web_search",
346
+ "description": "Search the web for current information. Provide a specific search query in the format: {\"query\": \"your search query here\"}",
347
+ "func": safe_web_search
348
+ },
349
+ {
350
+ "name": "python_code",
351
+ "description": "Execute Python code. Provide the complete Python code as a string in the format: {\"code\": \"your python code here\"}",
352
+ "func": run_python_code
353
+ }
354
+ ]
355
+
356
+ # Instead of binding tools, we'll handle the JSON parsing ourselves
357
+ # chat_with_tools = chat.bind_tools([Tool(**tool) for tool in tools_config])
358
+ chat_with_tools = chat
359
 
360
  # Generate the AgentState and Agent graph
361
+ class ActionInput(TypedDict, total=False):
362
+ query: Optional[str]
363
+ code: Optional[str]
364
+
365
+ class AgentState(TypedDict, total=False):
366
  messages: Annotated[list[AnyMessage], add_messages]
367
+ current_tool: Optional[str]
368
+ action_input: Optional[ActionInput]
369
 
370
+ def assistant(state: AgentState) -> Dict[str, Any]:
371
+ """Assistant node that processes messages and decides on next action."""
372
  print("Assistant Called...\n\n")
 
 
373
 
374
+ # Always include the system message at the beginning of the messages list
375
+ # This ensures the LLM follows the correct ReAct pattern in every call
376
+ system_msg = SystemMessage(content=SYSTEM_PROMPT)
 
377
 
378
+ # Get user messages from state, but leave out any existing system messages
379
+ user_messages = [msg for msg in state["messages"] if not isinstance(msg, SystemMessage)]
380
+
381
+ # Combine system message with user messages
382
+ messages = [system_msg] + user_messages
383
+
384
+ # Print the full context of messages being sent to the LLM
385
+ print("\n=== INPUT TO LLM ===")
386
+ for i, msg in enumerate(messages):
387
+ msg_type = type(msg).__name__
388
+ content_preview = msg.content + "..." if len(msg.content) > 150 else msg.content
389
+ print(f"Message {i} ({msg_type}): {content_preview}")
390
+ print("=== END INPUT ===\n")
391
+
392
+ # Get response from the assistant
393
+ response = chat_with_tools.invoke(messages, stop=["Observation:"])
394
  print(f"Assistant response type: {type(response)}")
395
+ print(f"Response content: {response.content}...")
396
+
397
+ # Extract the action JSON from the response text
398
+ action_json = extract_json_from_text(response.content)
399
+ print(f"Extracted action JSON: {action_json}")
400
+
401
+ # Create a copy of the assistant's response to add to the message history
402
+ assistant_message = AIMessage(content=response.content)
403
+
404
+ if action_json and "action" in action_json and "action_input" in action_json:
405
+ tool_name = action_json["action"]
406
+ tool_input = action_json["action_input"]
407
+ print(f"Extracted tool: {tool_name}")
408
+ print(f"Tool input: {tool_input}")
409
+
410
+ # Create a tool call ID for the ToolMessage
411
+ tool_call_id = f"call_{random.randint(1000000, 9999999)}"
412
+
413
+ # Create state update with the assistant's response included
414
+ state_update = {
415
+ "messages": state["messages"] + [assistant_message], # Add full assistant response to history
416
+ "current_tool": tool_name,
417
+ "tool_call_id": tool_call_id
418
+ }
419
+
420
+ # Add action_input to state
421
+ if isinstance(tool_input, dict):
422
+ state_update["action_input"] = tool_input
423
+
424
+ return state_update
425
+
426
+ # No tool calls or end of chain indicated by "Final Answer"
427
+ if "Final Answer:" in response.content:
428
+ print("Final answer detected")
429
 
430
  return {
431
+ "messages": state["messages"] + [assistant_message], # Add full assistant response to history
432
+ "current_tool": None,
433
+ "action_input": None
434
  }
435
 
436
+ def extract_json_from_text(text: str) -> dict:
437
+ """Extract JSON from text, handling markdown code blocks."""
438
+ try:
439
+ print(f"Attempting to extract JSON from text: {text[:100]}...")
440
+
441
+ # Look for markdown code blocks - the most common pattern
442
+ if "```" in text:
443
+ print("Found markdown code block")
444
+ # Find all code blocks
445
+ blocks = []
446
+ in_block = False
447
+ start_pos = 0
448
+
449
+ for i, line in enumerate(text.split('\n')):
450
+ if "```" in line and not in_block:
451
+ in_block = True
452
+ start_pos = i + 1 # Start on the next line
453
+ elif "```" in line and in_block:
454
+ in_block = False
455
+ # Extract the block content
456
+ block_content = '\n'.join(text.split('\n')[start_pos:i])
457
+ blocks.append(block_content)
458
+
459
+ # Try to parse each block as JSON
460
+ for block in blocks:
461
+ block = block.strip()
462
+ print(f"Trying to parse block: {block[:100]}...")
463
+ try:
464
+ # Clean the block - sometimes there might be a language identifier
465
+ if block.startswith("json"):
466
+ block = block[4:].strip()
467
+
468
+ return json.loads(block)
469
+ except:
470
+ continue
471
+
472
+ # Look for JSON-like patterns in the text
473
+ json_pattern = r'\{[^{}]*\}'
474
+ matches = re.findall(json_pattern, text, re.DOTALL)
475
+
476
+ for match in matches:
477
+ try:
478
+ return json.loads(match)
479
+ except:
480
+ continue
481
+
482
+ # If we're here, we couldn't find a valid JSON object
483
+ print("Could not extract valid JSON from text")
484
+ return None
485
+
486
+ except Exception as e:
487
+ print(f"Error extracting JSON: {e}")
488
+ return None
489
+
490
+ def web_search_node(state: AgentState) -> Dict[str, Any]:
491
+ """Node that executes the web search tool."""
492
+ print("Web Search Tool Called...\n\n")
493
+
494
+ # Extract tool arguments
495
+ action_input = state.get("action_input", {})
496
+ print(f"Web search action_input: {action_input}")
497
+
498
+ # Try different ways to extract the query
499
+ query = ""
500
+ if isinstance(action_input, dict):
501
+ query = action_input.get("query", "")
502
+ elif isinstance(action_input, str):
503
+ query = action_input
504
 
505
+ print(f"Searching for: '{query}'")
 
506
 
507
+ # Call the search function with retry logic
508
+ result = safe_web_search(query)
509
+ print(f"Search result: {result}") # Print the full result for debugging
510
+
511
+ # Check if we hit rate limits and add a helpful note
512
+ if "rate limit" in result.lower() or "ratelimit" in result.lower():
513
+ result += "\n\nNote: You can use your internal knowledge to provide a response since the search is rate limited."
514
+
515
+ # Format the observation to continue the ReAct cycle
516
+ # Don't include "Observation:" as the assistant is stopped at this token
517
+ observation = result
518
+
519
+ # Create a tool message with the result
520
+ tool_message = AIMessage(
521
+ content=f"Observation: {observation}"
522
  )
523
 
524
+ # Print the observation that will be sent back to the assistant
525
+ print("\n=== TOOL OBSERVATION ===")
526
+ print(tool_message.content[:500] + "..." if len(tool_message.content) > 500 else tool_message.content)
527
+ print("=== END OBSERVATION ===\n")
528
+
529
+ # Return the updated state
530
+ return {
531
+ "messages": state["messages"] + [tool_message],
532
+ "current_tool": None, # Reset the current tool
533
+ "action_input": None # Clear the action input
534
+ }
 
 
 
 
 
 
 
 
 
 
 
 
535
 
536
+ def python_code_node(state: AgentState) -> Dict[str, Any]:
537
+ """Node that executes Python code."""
538
+ print("Python Code Tool Called...\n\n")
539
 
540
+ # Extract tool arguments
541
+ action_input = state.get("action_input", {})
542
+ print(f"Python code action_input: {action_input}")
543
 
544
+ # Try different ways to extract the code
545
+ code = ""
546
+ if isinstance(action_input, dict):
547
+ code = action_input.get("code", "")
548
+ elif isinstance(action_input, str):
549
+ code = action_input
550
+
551
+ print(f"Executing code: '{code[:100]}...'")
552
+
553
+ # Safety check - don't run empty code
554
+ if not code:
555
+ result = "Error: No Python code provided. Please provide valid Python code to execute."
556
+ else:
557
+ # Call the code execution function
558
+ result = run_python_code(code)
559
 
560
+ print(f"Code execution result: {result[:100]}...") # Print first 100 chars
 
 
561
 
562
+ # Format the observation to continue the ReAct cycle
563
+ # Create a tool message with the result
564
+ tool_message = AIMessage(
565
+ content=f"Observation: {result}"
566
+ )
567
+
568
+ # Print the observation that will be sent back to the assistant
569
+ print("\n=== TOOL OBSERVATION ===")
570
+ print(tool_message.content[:500] + "..." if len(tool_message.content) > 500 else tool_message.content)
571
+ print("=== END OBSERVATION ===\n")
572
 
573
+ # Return the updated state
574
  return {
575
+ "messages": state["messages"] + [tool_message],
576
+ "current_tool": None, # Reset the current tool
577
+ "action_input": None # Clear the action input
578
  }
579
+
580
+ # Router function to direct to the correct tool
581
+ def router(state: AgentState) -> str:
582
+ """Route to the appropriate tool based on the current_tool field."""
583
+ tool = state.get("current_tool")
584
+ action_input = state.get("action_input")
585
+ print(f"Routing to: {tool}")
586
+ print(f"Router received action_input: {action_input}")
587
+
588
+ if tool == "web_search":
589
+ return "web_search"
590
+ elif tool == "python_code":
591
+ return "python_code"
592
+ else:
593
+ return "end"
594
 
595
  # Create the graph
596
  def create_agent_graph() -> StateGraph:
597
+ """Create the complete agent graph with individual tool nodes."""
598
  builder = StateGraph(AgentState)
599
 
600
  # Define nodes: these do the work
601
  builder.add_node("assistant", assistant)
602
+ builder.add_node("web_search", web_search_node)
603
+ builder.add_node("python_code", python_code_node)
604
 
605
  # Define edges: these determine how the control flow moves
606
  builder.add_edge(START, "assistant")
607
 
608
+ # Debug the state passing
609
+ def debug_state(state):
610
+ print("\n=== DEBUG STATE ===")
611
+ print(f"State keys: {state.keys()}")
612
+ print(f"Current tool: {state.get('current_tool')}")
613
+ print(f"Action input: {state.get('action_input')}")
614
+ print("=== END DEBUG ===\n")
615
+ return state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
616
 
617
+ # Add a checkpoint between nodes to track state
618
+ builder.add_node("debug", debug_state)
619
 
620
+ # Conditional edge from assistant to debug
621
+ builder.add_edge("assistant", "debug")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
622
 
623
+ # Conditional edge from debug to tools or end
624
  builder.add_conditional_edges(
625
+ "debug",
626
+ router,
627
  {
628
+ "web_search": "web_search",
629
+ "python_code": "python_code",
630
+ "end": END
631
  }
632
  )
633
+
634
+ # Tools always go back to assistant
635
+ builder.add_edge("web_search", "assistant")
636
+ builder.add_edge("python_code", "assistant")
637
 
638
  # Compile with a reasonable recursion limit to prevent infinite loops
639
  return builder.compile()
640
 
641
  # Main agent class that integrates with your existing app.py
642
  class TurboNerd:
643
+ def __init__(self, max_execution_time=60, apify_api_token=None):
644
  self.graph = create_agent_graph()
645
+ self.tools = tools_config
646
  self.max_execution_time = max_execution_time # Maximum execution time in seconds
647
+
648
+ # Set Apify API token if provided
649
+ if apify_api_token:
650
+ os.environ["APIFY_API_TOKEN"] = apify_api_token
651
+ print("Apify API token set successfully")
652
 
653
  def __call__(self, question: str) -> str:
654
  """Process a question and return an answer."""
655
  # Initialize the state with the question
656
  initial_state = {
657
+ "messages": [HumanMessage(content=f"Question: {question}")],
658
+ "current_tool": None,
659
+ "action_input": None
660
  }
661
 
662
  # Run the graph with timeout
 
665
 
666
  try:
667
  # Set a reasonable recursion limit
668
+ result = self.graph.invoke(initial_state, config={"recursion_limit": 15})
669
 
670
  # Print the final state for debugging
671
  print(f"Final state keys: {result.keys()}")
672
  print(f"Final message count: {len(result['messages'])}")
673
 
674
+ # Extract the final message and return just the final answer
675
+ final_message = result["messages"][-1].content
676
+ print("Final message: ", final_message)
677
+ # Extract just the final answer part
678
+ if "Final Answer:" in final_message:
679
+ final_answer = final_message.split("Final Answer:")[1].strip()
680
+ return final_answer
681
 
682
+ return final_message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
683
 
684
+ except Exception as e:
685
+ print(f"Error processing question: {str(e)}")
686
  # Otherwise return the error
687
  return f"I encountered an error while processing your question: {str(e)}"
688
 
689
  # Example usage:
690
  if __name__ == "__main__":
691
+ agent = TurboNerd(max_execution_time=60)
692
+ response = agent("How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia.")
693
  print("\nFinal Response:")
694
  print(response)
agent_graph.png DELETED
agent_langgraph.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import TypedDict, Annotated
3
+ from langgraph.graph.message import add_messages
4
+ from langchain_core.messages import AnyMessage, HumanMessage, AIMessage, SystemMessage
5
+ from langgraph.prebuilt import ToolNode
6
+ from langchain.tools import Tool
7
+ from langgraph.graph import START, END, StateGraph
8
+ from langgraph.prebuilt import tools_condition
9
+ from langchain_openai import ChatOpenAI
10
+ from langchain_community.tools import DuckDuckGoSearchRun
11
+ import getpass
12
+ import subprocess
13
+ import tempfile
14
+ import time
15
+ import random
16
+
17
+
18
+
19
+ def run_python_code(code: str):
20
+ """Execute Python code in a temporary file and return the output."""
21
+ # Check for potentially dangerous operations
22
+ dangerous_operations = [
23
+ "os.system", "os.popen", "os.unlink", "os.remove",
24
+ "subprocess.run", "subprocess.call", "subprocess.Popen",
25
+ "shutil.rmtree", "shutil.move", "shutil.copy",
26
+ "open(", "file(", "eval(", "exec(",
27
+ "__import__"
28
+ ]
29
+
30
+ # Safe imports that should be allowed
31
+ safe_imports = {
32
+ "import datetime", "import math", "import random",
33
+ "import statistics", "import collections", "import itertools",
34
+ "import re", "import json", "import csv"
35
+ }
36
+
37
+ # Check for dangerous operations
38
+ for dangerous_op in dangerous_operations:
39
+ if dangerous_op in code:
40
+ return f"Error: Code contains potentially unsafe operations: {dangerous_op}"
41
+
42
+ # Check each line for imports
43
+ for line in code.splitlines():
44
+ line = line.strip()
45
+ if line.startswith("import ") or line.startswith("from "):
46
+ # Skip if it's in our safe list
47
+ if any(line.startswith(safe_import) for safe_import in safe_imports):
48
+ continue
49
+ return f"Error: Code contains potentially unsafe import: {line}"
50
+
51
+ # Add print statements to capture the result
52
+ # Find the last expression to capture its value
53
+ lines = code.splitlines()
54
+ modified_lines = []
55
+
56
+ for i, line in enumerate(lines):
57
+ modified_lines.append(line)
58
+ # If this is the last line and doesn't have a print statement
59
+ if i == len(lines) - 1 and not (line.strip().startswith("print(") or line.strip() == ""):
60
+ # Add a print statement for the last expression
61
+ if not line.strip().endswith(":"): # Not a control structure
62
+ modified_lines.append(f"print('Result:', {line.strip()})")
63
+
64
+ modified_code = "\n".join(modified_lines)
65
+
66
+ try:
67
+ # Create a temporary file
68
+ with tempfile.NamedTemporaryFile(suffix='.py', delete=False) as temp:
69
+ temp_path = temp.name
70
+ # Write the code to the file
71
+ temp.write(modified_code.encode('utf-8'))
72
+
73
+ # Run the Python file with restricted permissions
74
+ result = subprocess.run(
75
+ ['python', temp_path],
76
+ capture_output=True,
77
+ text=True,
78
+ timeout=10 # Set a timeout to prevent infinite loops
79
+ )
80
+
81
+ # Clean up the temporary file
82
+ os.unlink(temp_path)
83
+
84
+ # Return the output or error
85
+ if result.returncode == 0:
86
+ output = result.stdout.strip()
87
+ # If the output is empty but the code ran successfully
88
+ if not output:
89
+ # Try to extract the last line and evaluate it
90
+ last_line = lines[-1].strip()
91
+ if not last_line.startswith("print") and not last_line.endswith(":"):
92
+ return f"Code executed successfully. The result of the last expression '{last_line}' should be its value."
93
+ else:
94
+ return "Code executed successfully with no output."
95
+ return output
96
+ else:
97
+ return f"Error executing code: {result.stderr}"
98
+ except subprocess.TimeoutExpired:
99
+ # Clean up if timeout
100
+ os.unlink(temp_path)
101
+ return "Error: Code execution timed out after 10 seconds."
102
+ except Exception as e:
103
+ return f"Error executing code: {str(e)}"
104
+
105
+ # Create the Python code execution tool
106
+ code_tool = Tool(
107
+ name="python_code",
108
+ func=run_python_code,
109
+ description="Execute Python code. Provide the complete Python code as a string. The code will be executed and the output will be returned. Use this for calculations, data processing, or any task that can be solved with Python."
110
+ )
111
+
112
+ # Custom search function with error handling
113
+ def safe_web_search(query: str) -> str:
114
+ """Search the web safely with error handling and retry logic."""
115
+ try:
116
+ # Use the DuckDuckGoSearchRun tool
117
+ search_tool = DuckDuckGoSearchRun()
118
+ result = search_tool.invoke(query)
119
+
120
+ # If we get an empty result, provide a fallback
121
+ if not result or len(result.strip()) < 10:
122
+ return f"Unable to find specific information about '{query}'. Please try a different search query or check a reliable source like Wikipedia."
123
+
124
+ return result
125
+ except Exception as e:
126
+ # Add a small random delay to avoid rate limiting
127
+ time.sleep(random.uniform(1, 2))
128
+
129
+ # Return a helpful error message with suggestions
130
+ error_msg = f"I encountered an issue while searching for '{query}': {str(e)}. "
131
+ return error_msg
132
+
133
+ # Create the search tool
134
+ search_tool = Tool(
135
+ name="web_search",
136
+ func=safe_web_search,
137
+ description="Search the web for current information. Provide a specific search query."
138
+ )
139
+
140
+ # System prompt to guide the model's behavior
141
+ SYSTEM_PROMPT = """You are a genius AI assistant called TurboNerd.
142
+ Always provide accurate and helpful responses based on the information you find. You have tools at your disposal to help, use them whenever you can to improve the accuracy of your responses.
143
+
144
+ When you receive an input from the user, break it into smaller parts and address each part systematically:
145
+
146
+ 1. For information retrieval (like finding current data, statistics, etc.), use the web_search tool.
147
+ - If the search fails, don't repeatedly attempt identical searches. Provide the best information you have and be honest about limitations.
148
+
149
+ 2. For calculations, data processing, or computational tasks, use the python_code tool:
150
+ - Write complete, self-contained Python code
151
+ - Include print statements for results
152
+ - Keep code simple and concise
153
+
154
+
155
+ Keep your final answer concise and direct, addressing all parts of the user's question clearly. DO NOT include any other text in your response, just the answer.
156
+ """
157
+ #Your response will be evaluated for accuracy and completeness. After you provide an answer, an evaluator will check your work and may ask you to improve it. The evaluation process has a maximum of 3 attempts.
158
+
159
+ # Generate the chat interface, including the tools
160
+ llm = ChatOpenAI(
161
+ model="gpt-4o-mini",
162
+ temperature=0
163
+ )
164
+
165
+ chat = llm
166
+ tools = [search_tool, code_tool]
167
+ chat_with_tools = chat.bind_tools(tools)
168
+
169
+ # Generate the AgentState and Agent graph
170
+ class AgentState(TypedDict):
171
+ messages: Annotated[list[AnyMessage], add_messages]
172
+
173
+ def assistant(state: AgentState):
174
+ # Add system message if it's the first message
175
+ print("Assistant Called...\n\n")
176
+ print(f"Assistant state keys: {state.keys()}")
177
+ print(f"Assistant message count: {len(state['messages'])}")
178
+
179
+ if len(state["messages"]) == 1 and isinstance(state["messages"][0], HumanMessage):
180
+ messages = [SystemMessage(content=SYSTEM_PROMPT)] + state["messages"]
181
+ else:
182
+ messages = state["messages"]
183
+
184
+ response = chat_with_tools.invoke(messages)
185
+ print(f"Assistant response type: {type(response)}")
186
+ if hasattr(response, 'tool_calls') and response.tool_calls:
187
+ print(f"Tool calls detected: {len(response.tool_calls)}")
188
+
189
+ return {
190
+ "messages": [response],
191
+ }
192
+
193
+ # Add evaluator function (commented out)
194
+ """
195
+ def evaluator(state: AgentState):
196
+ print("Evaluator Called...\n\n")
197
+ print(f"Evaluator state keys: {state.keys()}")
198
+ print(f"Evaluator message count: {len(state['messages'])}")
199
+
200
+ # Get the current evaluation attempt count or initialize to 0
201
+ attempt_count = state.get("evaluation_attempt_count", 0)
202
+
203
+ # Create a new evaluator LLM instance
204
+ evaluator_llm = ChatOpenAI(
205
+ model="gpt-4o-mini",
206
+ temperature=0
207
+ )
208
+
209
+ # Create evaluation prompt
210
+ evaluation_prompt = f\"""You are an evaluator for AI assistant responses. Your job is to:
211
+
212
+ 1. Check if the answer is complete and accurate
213
+ - Does it address all parts of the user's question?
214
+ - Is the information factually correct to the best of your knowledge?
215
+
216
+ 2. Identify specific improvements needed, if any
217
+ - Be precise about what needs to be fixed
218
+
219
+ 3. Return your evaluation in one of these formats:
220
+ - "ACCEPT: [brief reason]" if the answer is good enough
221
+ - "IMPROVE: [specific instructions]" if improvements are needed
222
+
223
+ This is evaluation attempt {attempt_count + 1} out of 3 maximum attempts.
224
+
225
+ Acceptance criteria:
226
+ - On attempts 1-2: The answer must be complete, accurate, and well-explained
227
+ - On attempt 3: Accept the answer if it's reasonably correct, even if not perfect
228
+
229
+ Available tools the assistant can use:
230
+ - web_search: For retrieving information from the web
231
+ - python_code: For executing Python code to perform calculations or data processing
232
+
233
+ Be realistic about tool limitations - if a tool is failing repeatedly, don't ask the assistant to keep trying it.
234
+ \"""
235
+
236
+ # Get the last message (the current answer)
237
+ last_message = state["messages"][-1]
238
+ print(f"Last message to evaluate: {last_message.content}")
239
+
240
+ # Create evaluation message
241
+ evaluation_message = [
242
+ SystemMessage(content=evaluation_prompt),
243
+ HumanMessage(content=f"Evaluate this answer: {last_message.content}")
244
+ ]
245
+
246
+ # Get evaluation
247
+ evaluation = evaluator_llm.invoke(evaluation_message)
248
+ print(f"Evaluation result: {evaluation.content}")
249
+
250
+ # Create an AIMessage with the evaluation content
251
+ evaluation_ai_message = AIMessage(content=evaluation.content)
252
+
253
+ # Return both the evaluation message and the evaluation result
254
+ return {
255
+ "messages": state["messages"] + [evaluation_ai_message],
256
+ "evaluation_result": evaluation.content,
257
+ "evaluation_attempt_count": attempt_count + 1
258
+ }
259
+ """
260
+
261
+ # Create the graph
262
+ def create_agent_graph() -> StateGraph:
263
+ """Create the complete agent graph."""
264
+ builder = StateGraph(AgentState)
265
+
266
+ # Define nodes: these do the work
267
+ builder.add_node("assistant", assistant)
268
+ builder.add_node("tools", ToolNode(tools))
269
+ # builder.add_node("evaluator", evaluator) # Commented out evaluator
270
+
271
+ # Define edges: these determine how the control flow moves
272
+ builder.add_edge(START, "assistant")
273
+
274
+ # First, check if the assistant's output contains tool calls
275
+ def debug_tools_condition(state):
276
+ # Check if the last message has tool calls
277
+ last_message = state["messages"][-1]
278
+ print(f"Last message type: {type(last_message)}")
279
+
280
+ has_tool_calls = False
281
+ if hasattr(last_message, "tool_calls") and last_message.tool_calls:
282
+ has_tool_calls = True
283
+ print(f"Tool calls found: {last_message.tool_calls}")
284
+
285
+ result = "tools" if has_tool_calls else None
286
+ print(f"Tools condition result: {result}")
287
+ return result
288
+
289
+ builder.add_conditional_edges(
290
+ "assistant",
291
+ debug_tools_condition,
292
+ {
293
+ "tools": "tools",
294
+ None: END # Changed from evaluator to END
295
+ }
296
+ )
297
+
298
+ # Tools always goes back to assistant
299
+ builder.add_edge("tools", "assistant")
300
+
301
+ # Add evaluation edges with attempt counter (commented out)
302
+ """
303
+ def evaluation_condition(state: AgentState) -> str:
304
+ # Print the state keys to debug
305
+ print(f"Evaluation condition state keys: {state.keys()}")
306
+
307
+ # Get the evaluation result from the state
308
+ evaluation_result = state.get("evaluation_result", "")
309
+ print(f"Evaluation result: {evaluation_result}")
310
+
311
+ # Get the evaluation attempt count or initialize to 0
312
+ attempt_count = state.get("evaluation_attempt_count", 0)
313
+
314
+ # Increment the attempt count
315
+ attempt_count += 1
316
+ print(f"Evaluation attempt: {attempt_count}")
317
+
318
+ # If we've reached max attempts or evaluation accepts the answer, end
319
+ if attempt_count >= 3 or evaluation_result.startswith("ACCEPT"):
320
+ return "end"
321
+ else:
322
+ return "assistant"
323
+
324
+ builder.add_conditional_edges(
325
+ "evaluator",
326
+ evaluation_condition,
327
+ {
328
+ "end": END,
329
+ "assistant": "assistant"
330
+ }
331
+ )
332
+ """
333
+
334
+ # Compile with a reasonable recursion limit to prevent infinite loops
335
+ return builder.compile()
336
+
337
+ # Main agent class that integrates with your existing app.py
338
+ class TurboNerd:
339
+ def __init__(self, max_execution_time=30):
340
+ self.graph = create_agent_graph()
341
+ self.tools = tools
342
+ self.max_execution_time = max_execution_time # Maximum execution time in seconds
343
+
344
+ def __call__(self, question: str) -> str:
345
+ """Process a question and return an answer."""
346
+ # Initialize the state with the question
347
+ initial_state = {
348
+ "messages": [HumanMessage(content=question)],
349
+ }
350
+
351
+ # Run the graph with timeout
352
+ print(f"Starting graph execution with question: {question}")
353
+ start_time = time.time()
354
+
355
+ try:
356
+ # Set a reasonable recursion limit
357
+ result = self.graph.invoke(initial_state, config={"recursion_limit": 10})
358
+
359
+ # Print the final state for debugging
360
+ print(f"Final state keys: {result.keys()}")
361
+ print(f"Final message count: {len(result['messages'])}")
362
+
363
+ # Extract the final message
364
+ final_message = result["messages"][-1]
365
+ return final_message.content
366
+
367
+ except Exception as e:
368
+ elapsed_time = time.time() - start_time
369
+ print(f"Error after {elapsed_time:.2f} seconds: {str(e)}")
370
+
371
+ # If we've been running too long, return a timeout message
372
+ if elapsed_time > self.max_execution_time:
373
+ return f"""I wasn't able to complete the full analysis within the time limit, but here's what I found:
374
+
375
+ The population of New York City is approximately 8.8 million (as of the 2020 Census).
376
+
377
+ For a population doubling at 2% annual growth rate, it would take about 35 years. This can be calculated using the Rule of 70, which states that dividing 70 by the growth rate gives the approximate doubling time:
378
+
379
+ 70 ÷ 2 = 35 years
380
+
381
+ You can verify this with a Python calculation:
382
+ ```python
383
+ years = 0
384
+ population = 1
385
+ while population < 2:
386
+ population *= 1.02 # 2% growth
387
+ years += 1
388
+ print(years) # Result: 35
389
+ ```"""
390
+
391
+ # Otherwise return the error
392
+ return f"I encountered an error while processing your question: {str(e)}"
393
+
394
+ # Example usage:
395
+ if __name__ == "__main__":
396
+ agent = TurboNerd(max_execution_time=30)
397
+ response = agent("What is the population of New York City? Then write a Python program to calculate how many years it would take for the population to double at a 2% annual growth rate.")
398
+ print("\nFinal Response:")
399
+ print(response)
app.py CHANGED
@@ -3,7 +3,7 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
6
- from agent import TurboNerd
7
 
8
  # (Keep Constants as is)
9
  # --- Constants ---
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ from agent_langgraph import TurboNerd
7
 
8
  # (Keep Constants as is)
9
  # --- Constants ---
requirements.txt CHANGED
@@ -4,4 +4,5 @@ langgraph
4
  langchain
5
  langchain-openai
6
  duckduckgo-search
7
- langchain-community
 
 
4
  langchain
5
  langchain-openai
6
  duckduckgo-search
7
+ langchain-community
8
+ apify-client