josondev commited on
Commit
25c1140
·
verified ·
1 Parent(s): b5c7fe2

Update veryfinal.py

Browse files
Files changed (1) hide show
  1. veryfinal.py +314 -154
veryfinal.py CHANGED
@@ -1,83 +1,133 @@
1
- import os, json, time, random, asyncio
2
  from dotenv import load_dotenv
3
- from typing import Optional, Dict, Any
 
4
 
5
  # Load environment variables
6
  load_dotenv()
7
 
8
- # Agno imports (corrected based on search results)
9
- from agno.agent import Agent
10
- from agno.models.groq import Groq
11
- from agno.models.google import Gemini
12
- from agno.tools.yfinance import YFinanceTools
13
 
14
- # Tavily import (replacing DuckDuckGo)
15
- from tavily import TavilyClient
 
 
 
 
 
16
 
17
- # Additional imports for custom tools
18
- from langchain_community.document_loaders import WikipediaLoader, ArxivLoader
19
 
20
- # Advanced Rate Limiter with exponential backoff (SILENT)
21
  class AdvancedRateLimiter:
22
- def __init__(self, requests_per_minute: int, tokens_per_minute: int = None):
23
  self.requests_per_minute = requests_per_minute
24
- self.tokens_per_minute = tokens_per_minute
25
  self.request_times = []
26
- self.token_usage = []
27
- self.consecutive_failures = 0
28
 
29
- def wait_if_needed(self, estimated_tokens: int = 1000):
30
  current_time = time.time()
31
-
32
  # Clean old requests (older than 1 minute)
33
  self.request_times = [t for t in self.request_times if current_time - t < 60]
34
- self.token_usage = [(t, tokens) for t, tokens in self.token_usage if current_time - t < 60]
35
 
36
- # Calculate wait time for requests (SILENT)
37
  if len(self.request_times) >= self.requests_per_minute:
38
  wait_time = 60 - (current_time - self.request_times[0]) + random.uniform(2, 8)
39
- time.sleep(wait_time) # Changed from asyncio.sleep to time.sleep
40
 
41
  # Record this request
42
  self.request_times.append(current_time)
43
- if self.tokens_per_minute:
44
- self.token_usage.append((current_time, estimated_tokens))
45
-
46
- def record_success(self):
47
- self.consecutive_failures = 0
48
-
49
- def record_failure(self):
50
- self.consecutive_failures += 1
51
 
52
  # Initialize rate limiters for free tiers
53
- groq_limiter = AdvancedRateLimiter(requests_per_minute=30, tokens_per_minute=6000)
54
- gemini_limiter = AdvancedRateLimiter(requests_per_minute=2, tokens_per_minute=32000)
 
55
  tavily_limiter = AdvancedRateLimiter(requests_per_minute=50)
56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  # Initialize Tavily client
58
- tavily_client = TavilyClient(os.getenv("TAVILY_API_KEY"))
 
 
 
 
 
 
 
59
 
60
- # Custom tool functions - ALL SYNCHRONOUS (SILENT)
 
61
  def multiply_tool(a: float, b: float) -> float:
62
- """Multiply two numbers."""
63
  return a * b
64
 
 
65
  def add_tool(a: float, b: float) -> float:
66
- """Add two numbers."""
67
  return a + b
68
 
 
69
  def subtract_tool(a: float, b: float) -> float:
70
- """Subtract two numbers."""
71
  return a - b
72
 
 
73
  def divide_tool(a: float, b: float) -> float:
74
- """Divide two numbers."""
75
  if b == 0:
76
  raise ValueError("Cannot divide by zero.")
77
  return a / b
78
 
 
79
  def tavily_search_tool(query: str) -> str:
80
- """Search using Tavily with rate limiting - SYNCHRONOUS."""
81
  try:
82
  tavily_limiter.wait_if_needed()
83
  response = tavily_client.search(
@@ -97,106 +147,231 @@ def tavily_search_tool(query: str) -> str:
97
  except Exception as e:
98
  return f"Tavily search failed: {str(e)}"
99
 
 
100
  def wiki_search_tool(query: str) -> str:
101
- """Search Wikipedia with rate limiting - SYNCHRONOUS."""
102
  try:
103
- time.sleep(random.uniform(1, 3)) # Changed from asyncio.sleep
 
104
  loader = WikipediaLoader(query=query, load_max_docs=1)
105
  data = loader.load()
106
  return "\n\n---\n\n".join([doc.page_content[:1000] for doc in data])
107
  except Exception as e:
108
  return f"Wikipedia search failed: {str(e)}"
109
 
110
- def arxiv_search_tool(query: str) -> str:
111
- """Search ArXiv with rate limiting - SYNCHRONOUS."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  try:
113
- time.sleep(random.uniform(1, 4)) # Changed from asyncio.sleep
114
- search_docs = ArxivLoader(query=query, load_max_docs=2).load()
115
- return "\n\n---\n\n".join([doc.page_content[:800] for doc in search_docs])
 
 
 
 
 
116
  except Exception as e:
117
- return f"ArXiv search failed: {str(e)}"
 
 
 
 
 
118
 
119
- # Create specialized Agno agents (SILENT)
120
- def create_agno_agents():
121
- """Create specialized Agno agents with the best free models"""
122
 
123
- # Math specialist agent (using Groq for speed)
124
- math_agent = Agent(
125
- name="Math Specialist",
126
- model=Groq(
127
- id="llama-3.3-70b-versatile",
128
- api_key=os.getenv("GROQ_API_KEY"),
129
- temperature=0
130
- ),
131
- tools=[multiply_tool, add_tool, subtract_tool, divide_tool],
132
- instructions=[
133
- "You are a mathematical specialist with access to calculation tools.",
134
- "Use the appropriate math tools for calculations.",
135
- "Show your work step by step.",
136
- "Always provide precise numerical answers.",
137
- "Finish with: FINAL ANSWER: [numerical result]"
138
- ],
139
- show_tool_calls=False, # SILENT
140
- markdown=False
141
- )
142
 
143
- # Research specialist agent (using Gemini for capability)
144
- research_agent = Agent(
145
- name="Research Specialist",
146
- model=Gemini(
147
- id="gemini-2.0-flash-thinking-exp",
148
- api_key=os.getenv("GOOGLE_API_KEY"),
149
- temperature=0
150
- ),
151
- tools=[tavily_search_tool, wiki_search_tool, arxiv_search_tool], # All synchronous now
152
- instructions=[
153
- "You are a research specialist with access to multiple search tools.",
154
- "Use Tavily search for current web information, Wikipedia for encyclopedic content, and ArXiv for academic papers.",
155
- "Always cite sources and provide well-researched answers.",
156
- "Synthesize information from multiple sources when possible.",
157
- "Finish with: FINAL ANSWER: [your researched answer]"
158
- ],
159
- show_tool_calls=False, # SILENT
160
- markdown=False
161
- )
162
 
163
- # Coordinator agent (using Groq for fast coordination)
164
- coordinator_agent = Agent(
165
- name="Coordinator",
166
- model=Groq(
167
- id="llama-3.3-70b-versatile",
168
- api_key=os.getenv("GROQ_API_KEY"),
169
- temperature=0
170
- ),
171
- tools=[tavily_search_tool, wiki_search_tool], # All synchronous now
172
- instructions=[
173
- "You are the main coordinator agent.",
174
- "Analyze queries and provide comprehensive responses.",
175
- "Use Tavily search for current information and Wikipedia for background context.",
176
- "Always finish with: FINAL ANSWER: [your final answer]"
177
- ],
178
- show_tool_calls=False, # SILENT
179
- markdown=False
180
- )
181
 
182
- return {
183
- "math": math_agent,
184
- "research": research_agent,
185
- "coordinator": coordinator_agent
186
- }
 
 
 
 
 
 
 
 
 
 
 
187
 
188
- # Main Agno multi-agent system (SIMPLIFIED - NO ASYNC)
189
- class AgnoMultiAgentSystem:
190
- """Agno multi-agent system with comprehensive rate limiting"""
191
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  def __init__(self):
193
- self.agents = create_agno_agents()
194
  self.request_count = 0
195
  self.last_request_time = time.time()
 
196
 
197
- def process_query(self, query: str, max_retries: int = 5) -> str:
198
- """Process query using Agno agents with rate limiting (SYNCHRONOUS)"""
 
199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  # Global rate limiting (SILENT)
201
  current_time = time.time()
202
  if current_time - self.last_request_time > 3600:
@@ -207,47 +382,32 @@ class AgnoMultiAgentSystem:
207
 
208
  # Add delay between requests (SILENT)
209
  if self.request_count > 1:
210
- time.sleep(random.uniform(3, 10)) # Changed from asyncio.sleep
 
 
 
 
 
 
 
 
211
 
212
- for attempt in range(max_retries):
213
- try:
214
- # Route to appropriate agent based on query type (SILENT)
215
- if any(word in query.lower() for word in ['calculate', 'math', 'multiply', 'add', 'subtract', 'divide', 'compute']):
216
- response = self.agents["math"].run(query, stream=False)
217
-
218
- elif any(word in query.lower() for word in ['search', 'find', 'research', 'what is', 'who is', 'when', 'where']):
219
- response = self.agents["research"].run(query, stream=False)
220
-
221
- else:
222
- response = self.agents["coordinator"].run(query, stream=False)
223
-
224
- return response.content if hasattr(response, 'content') else str(response)
225
-
226
- except Exception as e:
227
- error_msg = str(e).lower()
228
-
229
- if any(keyword in error_msg for keyword in ['rate limit', '429', 'quota', 'too many requests']):
230
- wait_time = (2 ** attempt) + random.uniform(15, 45)
231
- time.sleep(wait_time) # Changed from asyncio.sleep
232
- continue
233
-
234
- elif attempt == max_retries - 1:
235
- try:
236
- return self.agents["coordinator"].run(f"Answer this as best you can: {query}", stream=False)
237
- except:
238
- return f"Error: {str(e)}"
239
-
240
- else:
241
- wait_time = (2 ** attempt) + random.uniform(2, 8)
242
- time.sleep(wait_time) # Changed from asyncio.sleep
243
 
244
- return "Maximum retries exceeded. Please try again later."
 
 
 
 
 
 
245
 
246
- # SIMPLIFIED main function (NO ASYNC)
247
  def main(query: str) -> str:
248
- """Main function using Agno multi-agent system (SYNCHRONOUS)"""
249
- agno_system = AgnoMultiAgentSystem()
250
- return agno_system.process_query(query)
251
 
252
  def get_final_answer(query: str) -> str:
253
  """Extract only the FINAL ANSWER from the response"""
@@ -260,6 +420,6 @@ def get_final_answer(query: str) -> str:
260
  return full_response.strip()
261
 
262
  if __name__ == "__main__":
263
- # Test the Agno system - CLEAN OUTPUT ONLY
264
  result = get_final_answer("What are the names of the US presidents who were assassinated?")
265
  print(result)
 
1
+ import os, time, random
2
  from dotenv import load_dotenv
3
+ from typing import List, Dict, Any, TypedDict, Annotated
4
+ import operator
5
 
6
  # Load environment variables
7
  load_dotenv()
8
 
9
+ # LangGraph imports
10
+ from langgraph.graph import StateGraph, END
11
+ from langgraph.prebuilt import create_react_agent
12
+ from langgraph.checkpoint.memory import MemorySaver
 
13
 
14
+ # LangChain imports
15
+ from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
16
+ from langchain_core.tools import tool
17
+ from langchain_groq import ChatGroq
18
+ from langchain_google_genai import ChatGoogleGenerativeAI
19
+ from langchain_nvidia_ai_endpoints import ChatNVIDIA
20
+ from langchain_core.rate_limiters import InMemoryRateLimiter
21
 
22
+ # Tavily import
23
+ from tavily import TavilyClient
24
 
25
+ # Advanced Rate Limiter (SILENT)
26
  class AdvancedRateLimiter:
27
+ def __init__(self, requests_per_minute: int):
28
  self.requests_per_minute = requests_per_minute
 
29
  self.request_times = []
 
 
30
 
31
+ def wait_if_needed(self):
32
  current_time = time.time()
 
33
  # Clean old requests (older than 1 minute)
34
  self.request_times = [t for t in self.request_times if current_time - t < 60]
 
35
 
36
+ # Check if we need to wait
37
  if len(self.request_times) >= self.requests_per_minute:
38
  wait_time = 60 - (current_time - self.request_times[0]) + random.uniform(2, 8)
39
+ time.sleep(wait_time)
40
 
41
  # Record this request
42
  self.request_times.append(current_time)
 
 
 
 
 
 
 
 
43
 
44
  # Initialize rate limiters for free tiers
45
+ groq_limiter = AdvancedRateLimiter(requests_per_minute=30)
46
+ gemini_limiter = AdvancedRateLimiter(requests_per_minute=2)
47
+ nvidia_limiter = AdvancedRateLimiter(requests_per_minute=5) # NVIDIA free tier
48
  tavily_limiter = AdvancedRateLimiter(requests_per_minute=50)
49
 
50
+ # Initialize LangChain rate limiters for NVIDIA
51
+ nvidia_rate_limiter = InMemoryRateLimiter(
52
+ requests_per_second=0.083, # 5 requests per minute
53
+ check_every_n_seconds=0.1,
54
+ max_bucket_size=5
55
+ )
56
+
57
+ # Initialize LLMs with best free models
58
+ groq_llm = ChatGroq(
59
+ model="llama-3.3-70b-versatile",
60
+ api_key=os.getenv("GROQ_API_KEY"),
61
+ temperature=0
62
+ )
63
+
64
+ gemini_llm = ChatGoogleGenerativeAI(
65
+ model="gemini-2.0-flash-thinking-exp",
66
+ api_key=os.getenv("GOOGLE_API_KEY"),
67
+ temperature=0
68
+ )
69
+
70
+ # Best NVIDIA models based on search results
71
+ nvidia_general_llm = ChatNVIDIA(
72
+ model="meta/llama3-70b-instruct", # Best general model from NVIDIA
73
+ api_key=os.getenv("NVIDIA_API_KEY"),
74
+ temperature=0,
75
+ max_tokens=4000,
76
+ rate_limiter=nvidia_rate_limiter
77
+ )
78
+
79
+ nvidia_code_llm = ChatNVIDIA(
80
+ model="meta/codellama-70b", # Best code generation model from NVIDIA
81
+ api_key=os.getenv("NVIDIA_API_KEY"),
82
+ temperature=0,
83
+ max_tokens=4000,
84
+ rate_limiter=nvidia_rate_limiter
85
+ )
86
+
87
+ nvidia_math_llm = ChatNVIDIA(
88
+ model="mistralai/mixtral-8x22b-instruct-v0.1", # Best reasoning model from NVIDIA
89
+ api_key=os.getenv("NVIDIA_API_KEY"),
90
+ temperature=0,
91
+ max_tokens=4000,
92
+ rate_limiter=nvidia_rate_limiter
93
+ )
94
+
95
  # Initialize Tavily client
96
+ tavily_client = TavilyClient(api_key=os.getenv("TAVILY_API_KEY"))
97
+
98
+ # Define State
99
+ class AgentState(TypedDict):
100
+ messages: Annotated[List[HumanMessage | AIMessage], operator.add]
101
+ query: str
102
+ agent_type: str
103
+ final_answer: str
104
 
105
+ # Custom Tools
106
+ @tool
107
  def multiply_tool(a: float, b: float) -> float:
108
+ """Multiply two numbers together"""
109
  return a * b
110
 
111
+ @tool
112
  def add_tool(a: float, b: float) -> float:
113
+ """Add two numbers together"""
114
  return a + b
115
 
116
+ @tool
117
  def subtract_tool(a: float, b: float) -> float:
118
+ """Subtract two numbers"""
119
  return a - b
120
 
121
+ @tool
122
  def divide_tool(a: float, b: float) -> float:
123
+ """Divide two numbers"""
124
  if b == 0:
125
  raise ValueError("Cannot divide by zero.")
126
  return a / b
127
 
128
+ @tool
129
  def tavily_search_tool(query: str) -> str:
130
+ """Search the web using Tavily for current information"""
131
  try:
132
  tavily_limiter.wait_if_needed()
133
  response = tavily_client.search(
 
147
  except Exception as e:
148
  return f"Tavily search failed: {str(e)}"
149
 
150
+ @tool
151
  def wiki_search_tool(query: str) -> str:
152
+ """Search Wikipedia for encyclopedic information"""
153
  try:
154
+ time.sleep(random.uniform(1, 3))
155
+ from langchain_community.document_loaders import WikipediaLoader
156
  loader = WikipediaLoader(query=query, load_max_docs=1)
157
  data = loader.load()
158
  return "\n\n---\n\n".join([doc.page_content[:1000] for doc in data])
159
  except Exception as e:
160
  return f"Wikipedia search failed: {str(e)}"
161
 
162
+ # Define tools for each agent type
163
+ math_tools = [multiply_tool, add_tool, subtract_tool, divide_tool]
164
+ research_tools = [tavily_search_tool, wiki_search_tool]
165
+ coordinator_tools = [tavily_search_tool, wiki_search_tool]
166
+
167
+ # Node functions
168
+ def router_node(state: AgentState) -> AgentState:
169
+ """Route queries to appropriate agent type"""
170
+ query = state["query"].lower()
171
+
172
+ if any(word in query for word in ['calculate', 'math', 'multiply', 'add', 'subtract', 'divide', 'compute']):
173
+ agent_type = "math"
174
+ elif any(word in query for word in ['code', 'program', 'python', 'javascript', 'function', 'algorithm']):
175
+ agent_type = "code"
176
+ elif any(word in query for word in ['search', 'find', 'research', 'what is', 'who is', 'when', 'where']):
177
+ agent_type = "research"
178
+ else:
179
+ agent_type = "coordinator"
180
+
181
+ return {**state, "agent_type": agent_type}
182
+
183
+ def math_agent_node(state: AgentState) -> AgentState:
184
+ """Mathematical specialist agent using NVIDIA Mixtral"""
185
+ nvidia_limiter.wait_if_needed()
186
+
187
+ system_message = SystemMessage(content="""You are a mathematical specialist with access to calculation tools.
188
+ Use the appropriate math tools for calculations.
189
+ Show your work step by step.
190
+ Always provide precise numerical answers.
191
+ Finish with: FINAL ANSWER: [numerical result]""")
192
+
193
+ # Create math agent with NVIDIA's best reasoning model
194
+ math_agent = create_react_agent(nvidia_math_llm, math_tools)
195
+
196
+ # Process query
197
+ messages = [system_message, HumanMessage(content=state["query"])]
198
+ config = {"configurable": {"thread_id": "math_thread"}}
199
+
200
  try:
201
+ result = math_agent.invoke({"messages": messages}, config)
202
+ final_message = result["messages"][-1].content
203
+
204
+ return {
205
+ **state,
206
+ "messages": state["messages"] + [AIMessage(content=final_message)],
207
+ "final_answer": final_message
208
+ }
209
  except Exception as e:
210
+ error_msg = f"Math agent error: {str(e)}"
211
+ return {
212
+ **state,
213
+ "messages": state["messages"] + [AIMessage(content=error_msg)],
214
+ "final_answer": error_msg
215
+ }
216
 
217
+ def code_agent_node(state: AgentState) -> AgentState:
218
+ """Code generation specialist agent using NVIDIA CodeLlama"""
219
+ nvidia_limiter.wait_if_needed()
220
 
221
+ system_message = SystemMessage(content="""You are an expert coding AI specialist.
222
+ Generate clean, efficient, and well-documented code.
223
+ Explain your code solutions clearly.
224
+ Always provide working code examples.
225
+ Finish with: FINAL ANSWER: [your code solution]""")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
 
227
+ # Create code agent with NVIDIA's best code model
228
+ code_agent = create_react_agent(nvidia_code_llm, [])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
 
230
+ # Process query
231
+ messages = [system_message, HumanMessage(content=state["query"])]
232
+ config = {"configurable": {"thread_id": "code_thread"}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
 
234
+ try:
235
+ result = code_agent.invoke({"messages": messages}, config)
236
+ final_message = result["messages"][-1].content
237
+
238
+ return {
239
+ **state,
240
+ "messages": state["messages"] + [AIMessage(content=final_message)],
241
+ "final_answer": final_message
242
+ }
243
+ except Exception as e:
244
+ error_msg = f"Code agent error: {str(e)}"
245
+ return {
246
+ **state,
247
+ "messages": state["messages"] + [AIMessage(content=error_msg)],
248
+ "final_answer": error_msg
249
+ }
250
 
251
+ def research_agent_node(state: AgentState) -> AgentState:
252
+ """Research specialist agent using Gemini"""
253
+ gemini_limiter.wait_if_needed()
254
 
255
+ system_message = SystemMessage(content="""You are a research specialist with access to web search and Wikipedia.
256
+ Use appropriate search tools to gather comprehensive information.
257
+ Always cite sources and provide well-researched answers.
258
+ Synthesize information from multiple sources when possible.
259
+ Finish with: FINAL ANSWER: [your researched answer]""")
260
+
261
+ # Create research agent
262
+ research_agent = create_react_agent(gemini_llm, research_tools)
263
+
264
+ # Process query
265
+ messages = [system_message, HumanMessage(content=state["query"])]
266
+ config = {"configurable": {"thread_id": "research_thread"}}
267
+
268
+ try:
269
+ result = research_agent.invoke({"messages": messages}, config)
270
+ final_message = result["messages"][-1].content
271
+
272
+ return {
273
+ **state,
274
+ "messages": state["messages"] + [AIMessage(content=final_message)],
275
+ "final_answer": final_message
276
+ }
277
+ except Exception as e:
278
+ error_msg = f"Research agent error: {str(e)}"
279
+ return {
280
+ **state,
281
+ "messages": state["messages"] + [AIMessage(content=error_msg)],
282
+ "final_answer": error_msg
283
+ }
284
+
285
+ def coordinator_agent_node(state: AgentState) -> AgentState:
286
+ """Coordinator agent using NVIDIA Llama3"""
287
+ nvidia_limiter.wait_if_needed()
288
+
289
+ system_message = SystemMessage(content="""You are the main coordinator agent.
290
+ Analyze queries and provide comprehensive responses.
291
+ Use search tools for factual information when needed.
292
+ Always finish with: FINAL ANSWER: [your final answer]""")
293
+
294
+ # Create coordinator agent with NVIDIA's best general model
295
+ coordinator_agent = create_react_agent(nvidia_general_llm, coordinator_tools)
296
+
297
+ # Process query
298
+ messages = [system_message, HumanMessage(content=state["query"])]
299
+ config = {"configurable": {"thread_id": "coordinator_thread"}}
300
+
301
+ try:
302
+ result = coordinator_agent.invoke({"messages": messages}, config)
303
+ final_message = result["messages"][-1].content
304
+
305
+ return {
306
+ **state,
307
+ "messages": state["messages"] + [AIMessage(content=final_message)],
308
+ "final_answer": final_message
309
+ }
310
+ except Exception as e:
311
+ error_msg = f"Coordinator agent error: {str(e)}"
312
+ return {
313
+ **state,
314
+ "messages": state["messages"] + [AIMessage(content=error_msg)],
315
+ "final_answer": error_msg
316
+ }
317
+
318
+ # Conditional routing function
319
+ def route_agent(state: AgentState) -> str:
320
+ """Route to appropriate agent based on agent_type"""
321
+ agent_type = state.get("agent_type", "coordinator")
322
+
323
+ if agent_type == "math":
324
+ return "math_agent"
325
+ elif agent_type == "code":
326
+ return "code_agent"
327
+ elif agent_type == "research":
328
+ return "research_agent"
329
+ else:
330
+ return "coordinator_agent"
331
+
332
+ # LangGraph Multi-Agent System
333
+ class LangGraphMultiAgentSystem:
334
  def __init__(self):
 
335
  self.request_count = 0
336
  self.last_request_time = time.time()
337
+ self.graph = self._create_graph()
338
 
339
+ def _create_graph(self) -> StateGraph:
340
+ """Create the LangGraph workflow"""
341
+ workflow = StateGraph(AgentState)
342
 
343
+ # Add nodes
344
+ workflow.add_node("router", router_node)
345
+ workflow.add_node("math_agent", math_agent_node)
346
+ workflow.add_node("code_agent", code_agent_node)
347
+ workflow.add_node("research_agent", research_agent_node)
348
+ workflow.add_node("coordinator_agent", coordinator_agent_node)
349
+
350
+ # Add edges
351
+ workflow.set_entry_point("router")
352
+ workflow.add_conditional_edges(
353
+ "router",
354
+ route_agent,
355
+ {
356
+ "math_agent": "math_agent",
357
+ "code_agent": "code_agent",
358
+ "research_agent": "research_agent",
359
+ "coordinator_agent": "coordinator_agent"
360
+ }
361
+ )
362
+
363
+ # All agents end the workflow
364
+ workflow.add_edge("math_agent", END)
365
+ workflow.add_edge("code_agent", END)
366
+ workflow.add_edge("research_agent", END)
367
+ workflow.add_edge("coordinator_agent", END)
368
+
369
+ # Compile the graph
370
+ memory = MemorySaver()
371
+ return workflow.compile(checkpointer=memory)
372
+
373
+ def process_query(self, query: str) -> str:
374
+ """Process query using LangGraph multi-agent system"""
375
  # Global rate limiting (SILENT)
376
  current_time = time.time()
377
  if current_time - self.last_request_time > 3600:
 
382
 
383
  # Add delay between requests (SILENT)
384
  if self.request_count > 1:
385
+ time.sleep(random.uniform(3, 10))
386
+
387
+ # Initial state
388
+ initial_state = {
389
+ "messages": [HumanMessage(content=query)],
390
+ "query": query,
391
+ "agent_type": "",
392
+ "final_answer": ""
393
+ }
394
 
395
+ # Configuration for the graph
396
+ config = {"configurable": {"thread_id": f"thread_{self.request_count}"}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
 
398
+ try:
399
+ # Run the graph
400
+ final_state = self.graph.invoke(initial_state, config)
401
+ return final_state.get("final_answer", "No response generated")
402
+
403
+ except Exception as e:
404
+ return f"Error: {str(e)}"
405
 
406
+ # Main functions
407
  def main(query: str) -> str:
408
+ """Main function using LangGraph multi-agent system"""
409
+ langgraph_system = LangGraphMultiAgentSystem()
410
+ return langgraph_system.process_query(query)
411
 
412
  def get_final_answer(query: str) -> str:
413
  """Extract only the FINAL ANSWER from the response"""
 
420
  return full_response.strip()
421
 
422
  if __name__ == "__main__":
423
+ # Test the LangGraph system - CLEAN OUTPUT ONLY
424
  result = get_final_answer("What are the names of the US presidents who were assassinated?")
425
  print(result)