Update veryfinal.py
Browse files- veryfinal.py +278 -463
veryfinal.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
"""
|
2 |
-
|
3 |
-
|
4 |
"""
|
5 |
|
6 |
import os
|
@@ -9,518 +9,333 @@ import random
|
|
9 |
import operator
|
10 |
from typing import List, Dict, Any, TypedDict, Annotated, Optional
|
11 |
from dotenv import load_dotenv
|
12 |
-
from datetime import datetime
|
13 |
-
from textwrap import dedent
|
14 |
|
15 |
-
# Core LangChain imports
|
16 |
from langchain_core.tools import tool
|
17 |
-
from
|
|
|
18 |
from langgraph.graph import StateGraph, END
|
19 |
from langgraph.checkpoint.memory import MemorySaver
|
20 |
-
|
21 |
-
|
22 |
-
try:
|
23 |
-
from agno.agent import Agent
|
24 |
-
from agno.models.groq import Groq
|
25 |
-
from agno.models.ollama import Ollama
|
26 |
-
from agno.models.together import Together
|
27 |
-
from agno.models.anyscale import Anyscale
|
28 |
-
from agno.models.huggingface import HuggingFaceChat
|
29 |
-
from agno.models.nvidia import Nvidia # NVIDIA NIM integration
|
30 |
-
from agno.tools.duckduckgo import DuckDuckGoTools
|
31 |
-
from agno.tools.wikipedia import WikipediaTools
|
32 |
-
from agno.tools.calculator import Calculator
|
33 |
-
from agno.tools.reasoning import ReasoningTools
|
34 |
-
from agno.memory import AgentMemory
|
35 |
-
from agno.storage import AgentStorage
|
36 |
-
from agno.knowledge import AgentKnowledge
|
37 |
-
AGNO_AVAILABLE = True
|
38 |
-
except ImportError:
|
39 |
-
AGNO_AVAILABLE = False
|
40 |
-
print("Agno not available. Install with: pip install agno")
|
41 |
-
|
42 |
-
# Vector database imports
|
43 |
-
import faiss
|
44 |
-
import numpy as np
|
45 |
-
from sentence_transformers import SentenceTransformer
|
46 |
-
import json
|
47 |
|
48 |
load_dotenv()
|
49 |
|
50 |
-
#
|
51 |
-
|
52 |
-
You are a helpful assistant tasked with answering questions using available tools.
|
53 |
-
You must provide accurate, comprehensive answers based on available information.
|
54 |
-
|
55 |
-
Your capabilities include:
|
56 |
-
- Using search tools to find current information
|
57 |
-
- Performing mathematical calculations
|
58 |
-
- Reasoning through complex problems step by step
|
59 |
-
- Accessing Wikipedia for encyclopedic knowledge
|
60 |
|
61 |
Guidelines:
|
62 |
1. Use available tools to gather information when needed
|
63 |
2. Provide precise, factual answers
|
64 |
3. For numbers: don't use commas or units unless specified
|
65 |
4. For strings: don't use articles or abbreviations, write digits in plain text
|
66 |
-
5.
|
67 |
-
6.
|
68 |
-
7.
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
-
|
73 |
-
|
74 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
def __init__(self):
|
77 |
-
self.
|
78 |
-
self.
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
self.available_models['nvidia_mixtral'] = Nvidia(id="mistralai/mixtral-8x7b-instruct-v0.1")
|
92 |
-
self.available_models['nvidia_codellama'] = Nvidia(id="meta/codellama-70b-instruct")
|
93 |
-
self.available_models['nvidia_gemma'] = Nvidia(id="google/gemma-7b-it")
|
94 |
-
self.available_models['nvidia_yi'] = Nvidia(id="01-ai/yi-34b-chat")
|
95 |
-
print("NVIDIA NIM models initialized")
|
96 |
-
except Exception as e:
|
97 |
-
print(f"NVIDIA models not available: {e}")
|
98 |
|
99 |
-
|
100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
try:
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
except Exception as e:
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
self.available_models['ollama_llama3_70b'] = Ollama(id="llama3:70b")
|
114 |
-
self.available_models['ollama_mistral'] = Ollama(id="mistral")
|
115 |
-
self.available_models['ollama_phi3'] = Ollama(id="phi3")
|
116 |
-
self.available_models['ollama_codellama'] = Ollama(id="codellama")
|
117 |
-
self.available_models['ollama_gemma'] = Ollama(id="gemma")
|
118 |
-
self.available_models['ollama_qwen'] = Ollama(id="qwen")
|
119 |
-
print("Ollama local models initialized")
|
120 |
-
except Exception as e:
|
121 |
-
print(f"Ollama models not available: {e}")
|
122 |
-
|
123 |
-
# 4. Together AI (Open-source models)
|
124 |
-
if os.getenv("TOGETHER_API_KEY"):
|
125 |
try:
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
except Exception as e:
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
|
|
136 |
try:
|
137 |
-
|
138 |
-
|
139 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
except Exception as e:
|
141 |
-
|
142 |
-
|
143 |
-
# 6. Hugging Face (Open-source models)
|
144 |
-
try:
|
145 |
-
if os.getenv("HUGGINGFACE_API_KEY"):
|
146 |
-
self.available_models['hf_llama3_8b'] = HuggingFaceChat(id="meta-llama/Meta-Llama-3-8B-Instruct")
|
147 |
-
self.available_models['hf_mistral'] = HuggingFaceChat(id="mistralai/Mistral-7B-Instruct-v0.1")
|
148 |
-
print("Hugging Face open-source models initialized")
|
149 |
-
except Exception as e:
|
150 |
-
print(f"Hugging Face models not available: {e}")
|
151 |
-
|
152 |
-
print(f"Total available models: {len(self.available_models)}")
|
153 |
-
|
154 |
-
def get_model(self, model_name: str):
|
155 |
-
"""Get a specific model by name"""
|
156 |
-
return self.available_models.get(model_name)
|
157 |
-
|
158 |
-
def list_available_models(self) -> List[str]:
|
159 |
-
"""List all available model names"""
|
160 |
-
return list(self.available_models.keys())
|
161 |
-
|
162 |
-
def get_best_model_for_task(self, task_type: str):
|
163 |
-
"""Get the best available model for a specific task type"""
|
164 |
-
if task_type == "reasoning":
|
165 |
-
# Prefer larger, more capable models for reasoning
|
166 |
-
for model_name in ['nvidia_llama3_70b', 'groq_llama3_70b', 'together_llama3_70b', 'anyscale_llama3_70b', 'ollama_llama3_70b']:
|
167 |
-
if model_name in self.available_models:
|
168 |
-
return self.available_models[model_name]
|
169 |
-
|
170 |
-
elif task_type == "coding":
|
171 |
-
# Prefer code-specialized models
|
172 |
-
for model_name in ['nvidia_codellama', 'ollama_codellama', 'nvidia_llama3_70b', 'groq_llama3_70b']:
|
173 |
-
if model_name in self.available_models:
|
174 |
-
return self.available_models[model_name]
|
175 |
-
|
176 |
-
elif task_type == "fast":
|
177 |
-
# Prefer fast, smaller models
|
178 |
-
for model_name in ['groq_llama3_8b', 'nvidia_llama3_8b', 'groq_gemma', 'ollama_phi3', 'hf_llama3_8b']:
|
179 |
-
if model_name in self.available_models:
|
180 |
-
return self.available_models[model_name]
|
181 |
-
|
182 |
-
elif task_type == "enterprise":
|
183 |
-
# Prefer NVIDIA NIM for enterprise-grade tasks
|
184 |
-
for model_name in ['nvidia_llama3_70b', 'nvidia_mixtral', 'nvidia_codellama']:
|
185 |
-
if model_name in self.available_models:
|
186 |
-
return self.available_models[model_name]
|
187 |
-
|
188 |
-
# Default fallback to first available
|
189 |
-
if self.available_models:
|
190 |
-
return list(self.available_models.values())[0]
|
191 |
-
return None
|
192 |
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
self.model_manager = AgnoEnhancedModelManager()
|
199 |
-
self.agents = {}
|
200 |
-
self._create_specialized_agents()
|
201 |
-
|
202 |
-
def _create_specialized_agents(self):
|
203 |
-
"""Create specialized agents for different tasks using best available models"""
|
204 |
-
if not AGNO_AVAILABLE:
|
205 |
-
print("Agno not available, agents cannot be created")
|
206 |
-
return
|
207 |
-
|
208 |
-
# Enterprise Research Agent (NVIDIA preferred)
|
209 |
-
enterprise_model = self.model_manager.get_best_model_for_task("enterprise")
|
210 |
-
if enterprise_model:
|
211 |
-
self.agents['enterprise_research'] = Agent(
|
212 |
-
model=enterprise_model,
|
213 |
-
tools=[DuckDuckGoTools(), WikipediaTools(), ReasoningTools()],
|
214 |
-
description=dedent("""\
|
215 |
-
You are an enterprise-grade research specialist with access to optimized models.
|
216 |
-
Your expertise lies in comprehensive analysis, fact-checking, and providing
|
217 |
-
detailed, accurate responses for complex research tasks.
|
218 |
|
219 |
-
|
220 |
-
|
221 |
-
- Comprehensive and thorough analysis
|
222 |
-
- Multi-source verification
|
223 |
-
- Professional-grade output quality
|
224 |
-
"""),
|
225 |
-
instructions=dedent("""\
|
226 |
-
1. Use advanced reasoning capabilities for complex analysis
|
227 |
-
2. Cross-reference multiple sources for maximum accuracy
|
228 |
-
3. Provide comprehensive, well-structured responses
|
229 |
-
4. Include confidence levels and source reliability assessment
|
230 |
-
5. Always end with 'FINAL ANSWER: [your comprehensive answer]'
|
231 |
-
6. Prioritize accuracy and completeness over speed
|
232 |
-
"""),
|
233 |
-
memory=AgentMemory(),
|
234 |
-
markdown=True,
|
235 |
-
show_tool_calls=True,
|
236 |
-
add_datetime_to_instructions=True
|
237 |
-
)
|
238 |
-
|
239 |
-
# Advanced Math Agent (Best reasoning model)
|
240 |
-
math_model = self.model_manager.get_best_model_for_task("reasoning")
|
241 |
-
if math_model:
|
242 |
-
self.agents['advanced_math'] = Agent(
|
243 |
-
model=math_model,
|
244 |
-
tools=[Calculator(), ReasoningTools()],
|
245 |
-
description=dedent("""\
|
246 |
-
You are an advanced mathematics expert with access to powerful reasoning models.
|
247 |
-
You excel at complex mathematical problem solving, statistical analysis,
|
248 |
-
and providing step-by-step solutions with high accuracy.
|
249 |
|
250 |
-
|
251 |
-
|
252 |
-
- Step-by-step problem decomposition
|
253 |
-
- High-precision calculations
|
254 |
-
- Clear mathematical communication
|
255 |
-
"""),
|
256 |
-
instructions=dedent("""\
|
257 |
-
1. Break down complex mathematical problems systematically
|
258 |
-
2. Use advanced reasoning for multi-step problems
|
259 |
-
3. Show detailed work and methodology
|
260 |
-
4. Verify calculations using multiple approaches when possible
|
261 |
-
5. Provide exact numerical answers without commas or units unless specified
|
262 |
-
6. Always end with 'FINAL ANSWER: [precise numerical result]'
|
263 |
-
"""),
|
264 |
-
memory=AgentMemory(),
|
265 |
-
markdown=True,
|
266 |
-
show_tool_calls=True
|
267 |
-
)
|
268 |
-
|
269 |
-
# Fast Response Agent (Optimized for speed)
|
270 |
-
fast_model = self.model_manager.get_best_model_for_task("fast")
|
271 |
-
if fast_model:
|
272 |
-
self.agents['fast_response'] = Agent(
|
273 |
-
model=fast_model,
|
274 |
-
tools=[DuckDuckGoTools(), WikipediaTools()],
|
275 |
-
description=dedent("""\
|
276 |
-
You are a rapid response specialist optimized for quick, accurate answers.
|
277 |
-
You provide concise, direct responses while maintaining high quality standards.
|
278 |
|
279 |
-
|
280 |
-
|
281 |
-
- Direct and concise communication
|
282 |
-
- Efficient tool usage
|
283 |
-
- Quality maintained at high speed
|
284 |
-
"""),
|
285 |
-
instructions=dedent("""\
|
286 |
-
1. Provide quick, accurate answers
|
287 |
-
2. Use tools efficiently - only when necessary
|
288 |
-
3. Be direct and avoid unnecessary elaboration
|
289 |
-
4. Maintain accuracy despite speed focus
|
290 |
-
5. Always end with 'FINAL ANSWER: [your concise answer]'
|
291 |
-
6. Prioritize clarity and correctness
|
292 |
-
"""),
|
293 |
-
markdown=True,
|
294 |
-
show_tool_calls=False
|
295 |
-
)
|
296 |
-
|
297 |
-
# Advanced Coding Agent (Code-specialized model)
|
298 |
-
coding_model = self.model_manager.get_best_model_for_task("coding")
|
299 |
-
if coding_model:
|
300 |
-
self.agents['advanced_coding'] = Agent(
|
301 |
-
model=coding_model,
|
302 |
-
tools=[ReasoningTools()],
|
303 |
-
description=dedent("""\
|
304 |
-
You are an advanced programming expert with access to code-specialized models.
|
305 |
-
You excel at complex code generation, algorithm design, debugging, and
|
306 |
-
software architecture recommendations.
|
307 |
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
)
|
326 |
-
|
327 |
-
# Standard Research Agent (Fallback)
|
328 |
-
research_model = self.model_manager.get_best_model_for_task("reasoning")
|
329 |
-
if research_model and 'enterprise_research' not in self.agents:
|
330 |
-
self.agents['research'] = Agent(
|
331 |
-
model=research_model,
|
332 |
-
tools=[DuckDuckGoTools(), WikipediaTools(), ReasoningTools()],
|
333 |
-
description=dedent("""\
|
334 |
-
You are a research specialist with expertise in finding and analyzing information.
|
335 |
-
Your specialty lies in gathering comprehensive data from multiple sources.
|
336 |
-
"""),
|
337 |
-
instructions=dedent("""\
|
338 |
-
1. Use search tools to find current and relevant information
|
339 |
-
2. Apply systematic reasoning to analyze findings
|
340 |
-
3. Provide comprehensive answers with sources
|
341 |
-
4. Always end with 'FINAL ANSWER: [your answer]'
|
342 |
-
"""),
|
343 |
-
memory=AgentMemory(),
|
344 |
-
markdown=True,
|
345 |
-
show_tool_calls=True
|
346 |
-
)
|
347 |
-
|
348 |
-
print(f"Created {len(self.agents)} specialized Agno agents with enhanced models")
|
349 |
-
|
350 |
-
def route_query(self, query: str) -> str:
|
351 |
-
"""Route query to the most appropriate agent"""
|
352 |
-
q_lower = query.lower()
|
353 |
-
|
354 |
-
# Route to specialized agents
|
355 |
-
if any(keyword in q_lower for keyword in ["calculate", "math", "multiply", "add", "subtract", "divide", "compute", "statistical"]):
|
356 |
-
if 'advanced_math' in self.agents:
|
357 |
-
return self._query_agent('advanced_math', query)
|
358 |
-
elif 'math' in self.agents:
|
359 |
-
return self._query_agent('math', query)
|
360 |
-
|
361 |
-
elif any(keyword in q_lower for keyword in ["code", "programming", "function", "algorithm", "python", "javascript", "debug"]):
|
362 |
-
if 'advanced_coding' in self.agents:
|
363 |
-
return self._query_agent('advanced_coding', query)
|
364 |
-
elif 'coding' in self.agents:
|
365 |
-
return self._query_agent('coding', query)
|
366 |
-
|
367 |
-
elif any(keyword in q_lower for keyword in ["enterprise", "analysis", "comprehensive", "detailed", "professional"]):
|
368 |
-
if 'enterprise_research' in self.agents:
|
369 |
-
return self._query_agent('enterprise_research', query)
|
370 |
-
|
371 |
-
elif any(keyword in q_lower for keyword in ["research", "find", "search", "information", "study", "analyze"]):
|
372 |
-
if 'enterprise_research' in self.agents:
|
373 |
-
return self._query_agent('enterprise_research', query)
|
374 |
-
elif 'research' in self.agents:
|
375 |
-
return self._query_agent('research', query)
|
376 |
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
|
|
|
|
382 |
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
391 |
|
392 |
-
return "No agents available"
|
393 |
-
|
394 |
-
def _query_agent(self, agent_name: str, query: str) -> str:
|
395 |
-
"""Query a specific agent"""
|
396 |
try:
|
397 |
-
|
398 |
-
|
399 |
|
400 |
-
#
|
401 |
-
if
|
402 |
-
return
|
403 |
|
404 |
-
return
|
405 |
except Exception as e:
|
406 |
-
return f"Error
|
407 |
-
|
408 |
-
def get_system_info(self) -> Dict[str, Any]:
|
409 |
-
"""Get information about available agents and models"""
|
410 |
-
model_breakdown = {
|
411 |
-
"nvidia_models": [m for m in self.model_manager.list_available_models() if m.startswith("nvidia_")],
|
412 |
-
"groq_models": [m for m in self.model_manager.list_available_models() if m.startswith("groq_")],
|
413 |
-
"ollama_models": [m for m in self.model_manager.list_available_models() if m.startswith("ollama_")],
|
414 |
-
"together_models": [m for m in self.model_manager.list_available_models() if m.startswith("together_")],
|
415 |
-
"anyscale_models": [m for m in self.model_manager.list_available_models() if m.startswith("anyscale_")],
|
416 |
-
"hf_models": [m for m in self.model_manager.list_available_models() if m.startswith("hf_")]
|
417 |
-
}
|
418 |
-
|
419 |
-
return {
|
420 |
-
"available_models": self.model_manager.list_available_models(),
|
421 |
-
"model_breakdown": model_breakdown,
|
422 |
-
"active_agents": list(self.agents.keys()),
|
423 |
-
"agno_available": AGNO_AVAILABLE,
|
424 |
-
"total_models": len(self.model_manager.available_models),
|
425 |
-
"nvidia_available": len(model_breakdown["nvidia_models"]) > 0
|
426 |
-
}
|
427 |
-
|
428 |
-
# ---- Enhanced Agent State for LangGraph compatibility ----
|
429 |
-
class EnhancedAgentState(TypedDict):
|
430 |
-
"""State structure for compatibility with existing system."""
|
431 |
-
messages: Annotated[List[HumanMessage | AIMessage], operator.add]
|
432 |
-
query: str
|
433 |
-
agent_type: str
|
434 |
-
final_answer: str
|
435 |
-
perf: Dict[str, Any]
|
436 |
-
tools_used: List[str]
|
437 |
-
reasoning: str
|
438 |
-
model_used: str
|
439 |
|
440 |
-
# ----
|
441 |
class UnifiedAgnoEnhancedSystem:
|
442 |
-
"""
|
443 |
|
444 |
def __init__(self):
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
else:
|
450 |
-
print("Agno not available")
|
451 |
-
self.agno_system = None
|
452 |
-
self.graph = None
|
453 |
-
|
454 |
-
def _build_compatibility_graph(self):
|
455 |
-
"""Build LangGraph for compatibility with existing app.py"""
|
456 |
-
def process_node(state: EnhancedAgentState) -> EnhancedAgentState:
|
457 |
-
"""Process query through enhanced Agno system"""
|
458 |
-
query = state.get("query", "")
|
459 |
-
|
460 |
-
if self.agno_system:
|
461 |
-
answer = self.agno_system.route_query(query)
|
462 |
-
return {**state, "final_answer": answer}
|
463 |
-
else:
|
464 |
-
return {**state, "final_answer": "Enhanced Agno system not available"}
|
465 |
-
|
466 |
-
g = StateGraph(EnhancedAgentState)
|
467 |
-
g.add_node("process", process_node)
|
468 |
-
g.set_entry_point("process")
|
469 |
-
g.add_edge("process", END)
|
470 |
-
|
471 |
-
return g.compile(checkpointer=MemorySaver())
|
472 |
|
473 |
def process_query(self, query: str) -> str:
|
474 |
-
|
475 |
-
if self.agno_system:
|
476 |
-
return self.agno_system.route_query(query)
|
477 |
-
else:
|
478 |
-
return "Enhanced Agno system not available"
|
479 |
|
480 |
def get_system_info(self) -> Dict[str, Any]:
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
|
|
486 |
|
487 |
-
#
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
|
|
|
|
|
|
492 |
|
493 |
-
# ---- Main execution ----
|
494 |
if __name__ == "__main__":
|
495 |
-
#
|
496 |
-
system =
|
497 |
-
|
498 |
-
# Print system information
|
499 |
-
info = system.get_system_info()
|
500 |
-
print("Enhanced Agno System Information:")
|
501 |
-
for key, value in info.items():
|
502 |
-
if isinstance(value, dict):
|
503 |
-
print(f" {key}:")
|
504 |
-
for subkey, subvalue in value.items():
|
505 |
-
print(f" {subkey}: {subvalue}")
|
506 |
-
else:
|
507 |
-
print(f" {key}: {value}")
|
508 |
|
509 |
-
# Test queries
|
510 |
test_questions = [
|
511 |
-
"
|
512 |
-
"
|
513 |
-
"
|
514 |
-
"Find comprehensive information about Mercedes Sosa albums between 2000-2009",
|
515 |
-
"Quick answer: What is the capital of France?"
|
516 |
]
|
517 |
|
518 |
-
print("
|
519 |
-
print("Testing Enhanced Agno Multi-LLM System with NVIDIA")
|
520 |
-
print("="*60)
|
521 |
-
|
522 |
for i, question in enumerate(test_questions, 1):
|
523 |
print(f"\nQuestion {i}: {question}")
|
524 |
-
print("-" * 50)
|
525 |
answer = system.process_query(question)
|
526 |
print(f"Answer: {answer}")
|
|
|
1 |
"""
|
2 |
+
Final Working Multi-LLM Agent System
|
3 |
+
Robust fallback system that works even when Agno fails
|
4 |
"""
|
5 |
|
6 |
import os
|
|
|
9 |
import operator
|
10 |
from typing import List, Dict, Any, TypedDict, Annotated, Optional
|
11 |
from dotenv import load_dotenv
|
|
|
|
|
12 |
|
13 |
+
# Core LangChain imports
|
14 |
from langchain_core.tools import tool
|
15 |
+
from langchain_community.tools.tavily_search import TavilySearchResults
|
16 |
+
from langchain_community.document_loaders import WikipediaLoader
|
17 |
from langgraph.graph import StateGraph, END
|
18 |
from langgraph.checkpoint.memory import MemorySaver
|
19 |
+
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
|
20 |
+
from langchain_groq import ChatGroq
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
load_dotenv()
|
23 |
|
24 |
+
# System prompt for proper question answering
|
25 |
+
SYSTEM_PROMPT = """You are a helpful assistant tasked with answering questions using available tools.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
Guidelines:
|
28 |
1. Use available tools to gather information when needed
|
29 |
2. Provide precise, factual answers
|
30 |
3. For numbers: don't use commas or units unless specified
|
31 |
4. For strings: don't use articles or abbreviations, write digits in plain text
|
32 |
+
5. Always end with 'FINAL ANSWER: [YOUR ANSWER]'
|
33 |
+
6. Be concise but thorough
|
34 |
+
7. If you cannot find the answer, state that clearly"""
|
35 |
+
|
36 |
+
# ---- Tool Definitions ----
|
37 |
+
@tool
|
38 |
+
def multiply(a: int, b: int) -> int:
|
39 |
+
"""Multiply two integers and return the product."""
|
40 |
+
return a * b
|
41 |
+
|
42 |
+
@tool
|
43 |
+
def add(a: int, b: int) -> int:
|
44 |
+
"""Add two integers and return the sum."""
|
45 |
+
return a + b
|
46 |
+
|
47 |
+
@tool
|
48 |
+
def subtract(a: int, b: int) -> int:
|
49 |
+
"""Subtract the second integer from the first and return the difference."""
|
50 |
+
return a - b
|
51 |
+
|
52 |
+
@tool
|
53 |
+
def divide(a: int, b: int) -> float:
|
54 |
+
"""Divide the first integer by the second and return the quotient."""
|
55 |
+
if b == 0:
|
56 |
+
raise ValueError("Cannot divide by zero.")
|
57 |
+
return a / b
|
58 |
|
59 |
+
@tool
|
60 |
+
def modulus(a: int, b: int) -> int:
|
61 |
+
"""Return the remainder when dividing the first integer by the second."""
|
62 |
+
return a % b
|
63 |
+
|
64 |
+
@tool
|
65 |
+
def web_search(query: str) -> str:
|
66 |
+
"""Search the web for information."""
|
67 |
+
try:
|
68 |
+
if os.getenv("TAVILY_API_KEY"):
|
69 |
+
time.sleep(random.uniform(0.5, 1.0))
|
70 |
+
search_tool = TavilySearchResults(max_results=3)
|
71 |
+
docs = search_tool.invoke({"query": query})
|
72 |
+
return "\n\n---\n\n".join(
|
73 |
+
f"<Doc url='{d.get('url','')}'>{d.get('content','')[:600]}</Doc>"
|
74 |
+
for d in docs
|
75 |
+
)
|
76 |
+
else:
|
77 |
+
return "Web search not available - no API key"
|
78 |
+
except Exception as e:
|
79 |
+
return f"Web search failed: {e}"
|
80 |
+
|
81 |
+
@tool
|
82 |
+
def wiki_search(query: str) -> str:
|
83 |
+
"""Search Wikipedia for information."""
|
84 |
+
try:
|
85 |
+
time.sleep(random.uniform(0.3, 0.8))
|
86 |
+
docs = WikipediaLoader(query=query, load_max_docs=2).load()
|
87 |
+
return "\n\n---\n\n".join(
|
88 |
+
f"<Doc src='Wikipedia'>{d.page_content[:800]}</Doc>"
|
89 |
+
for d in docs
|
90 |
+
)
|
91 |
+
except Exception as e:
|
92 |
+
return f"Wikipedia search failed: {e}"
|
93 |
+
|
94 |
+
# ---- Enhanced Agent State ----
|
95 |
+
class EnhancedAgentState(TypedDict):
|
96 |
+
messages: Annotated[List[HumanMessage | AIMessage], operator.add]
|
97 |
+
query: str
|
98 |
+
agent_type: str
|
99 |
+
final_answer: str
|
100 |
+
perf: Dict[str, Any]
|
101 |
+
tools_used: List[str]
|
102 |
+
|
103 |
+
# ---- Working Multi-LLM System ----
|
104 |
+
class WorkingMultiLLMSystem:
|
105 |
+
"""Reliable multi-LLM system that actually works"""
|
106 |
|
107 |
def __init__(self):
|
108 |
+
self.tools = [multiply, add, subtract, divide, modulus, web_search, wiki_search]
|
109 |
+
self.graph = self._build_graph()
|
110 |
+
print("✅ Working Multi-LLM System initialized")
|
111 |
+
|
112 |
+
def _get_llm(self, model_name: str = "llama3-70b-8192"):
|
113 |
+
"""Get Groq LLM instance"""
|
114 |
+
return ChatGroq(
|
115 |
+
model=model_name,
|
116 |
+
temperature=0,
|
117 |
+
api_key=os.getenv("GROQ_API_KEY")
|
118 |
+
)
|
119 |
+
|
120 |
+
def _build_graph(self) -> StateGraph:
|
121 |
+
"""Build the working LangGraph system"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
+
def router(st: EnhancedAgentState) -> EnhancedAgentState:
|
124 |
+
"""Route queries to appropriate processing"""
|
125 |
+
q = st["query"].lower()
|
126 |
+
|
127 |
+
if any(keyword in q for keyword in ["calculate", "multiply", "add", "subtract", "divide", "math"]):
|
128 |
+
agent_type = "math"
|
129 |
+
elif any(keyword in q for keyword in ["search", "find", "information", "about"]):
|
130 |
+
agent_type = "search"
|
131 |
+
elif any(keyword in q for keyword in ["wikipedia", "wiki"]):
|
132 |
+
agent_type = "wiki"
|
133 |
+
else:
|
134 |
+
agent_type = "general"
|
135 |
+
|
136 |
+
return {**st, "agent_type": agent_type, "tools_used": []}
|
137 |
+
|
138 |
+
def math_node(st: EnhancedAgentState) -> EnhancedAgentState:
|
139 |
+
"""Handle mathematical queries"""
|
140 |
+
t0 = time.time()
|
141 |
try:
|
142 |
+
llm = self._get_llm("llama3-70b-8192")
|
143 |
+
|
144 |
+
enhanced_query = f"""
|
145 |
+
Question: {st["query"]}
|
146 |
+
|
147 |
+
This is a mathematical question. Please solve it step by step and provide the exact numerical answer.
|
148 |
+
"""
|
149 |
+
|
150 |
+
sys_msg = SystemMessage(content=SYSTEM_PROMPT)
|
151 |
+
response = llm.invoke([sys_msg, HumanMessage(content=enhanced_query)])
|
152 |
+
|
153 |
+
answer = response.content.strip()
|
154 |
+
if "FINAL ANSWER:" in answer:
|
155 |
+
answer = answer.split("FINAL ANSWER:")[-1].strip()
|
156 |
+
|
157 |
+
return {**st,
|
158 |
+
"final_answer": answer,
|
159 |
+
"perf": {"time": time.time() - t0, "provider": "Groq-Math"}}
|
160 |
except Exception as e:
|
161 |
+
return {**st, "final_answer": f"Error: {e}", "perf": {"error": str(e)}}
|
162 |
+
|
163 |
+
def search_node(st: EnhancedAgentState) -> EnhancedAgentState:
|
164 |
+
"""Handle search queries"""
|
165 |
+
t0 = time.time()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
try:
|
167 |
+
# Perform web search
|
168 |
+
search_results = web_search.invoke({"query": st["query"]})
|
169 |
+
|
170 |
+
llm = self._get_llm("llama3-70b-8192")
|
171 |
+
|
172 |
+
enhanced_query = f"""
|
173 |
+
Question: {st["query"]}
|
174 |
+
|
175 |
+
Search Results:
|
176 |
+
{search_results}
|
177 |
+
|
178 |
+
Based on the search results above, provide a direct answer to the question.
|
179 |
+
"""
|
180 |
+
|
181 |
+
sys_msg = SystemMessage(content=SYSTEM_PROMPT)
|
182 |
+
response = llm.invoke([sys_msg, HumanMessage(content=enhanced_query)])
|
183 |
+
|
184 |
+
answer = response.content.strip()
|
185 |
+
if "FINAL ANSWER:" in answer:
|
186 |
+
answer = answer.split("FINAL ANSWER:")[-1].strip()
|
187 |
+
|
188 |
+
return {**st,
|
189 |
+
"final_answer": answer,
|
190 |
+
"tools_used": ["web_search"],
|
191 |
+
"perf": {"time": time.time() - t0, "provider": "Groq-Search"}}
|
192 |
except Exception as e:
|
193 |
+
return {**st, "final_answer": f"Error: {e}", "perf": {"error": str(e)}}
|
194 |
+
|
195 |
+
def wiki_node(st: EnhancedAgentState) -> EnhancedAgentState:
|
196 |
+
"""Handle Wikipedia queries"""
|
197 |
+
t0 = time.time()
|
198 |
try:
|
199 |
+
# Perform Wikipedia search
|
200 |
+
wiki_results = wiki_search.invoke({"query": st["query"]})
|
201 |
+
|
202 |
+
llm = self._get_llm("llama3-70b-8192")
|
203 |
+
|
204 |
+
enhanced_query = f"""
|
205 |
+
Question: {st["query"]}
|
206 |
+
|
207 |
+
Wikipedia Results:
|
208 |
+
{wiki_results}
|
209 |
+
|
210 |
+
Based on the Wikipedia information above, provide a direct answer to the question.
|
211 |
+
"""
|
212 |
+
|
213 |
+
sys_msg = SystemMessage(content=SYSTEM_PROMPT)
|
214 |
+
response = llm.invoke([sys_msg, HumanMessage(content=enhanced_query)])
|
215 |
+
|
216 |
+
answer = response.content.strip()
|
217 |
+
if "FINAL ANSWER:" in answer:
|
218 |
+
answer = answer.split("FINAL ANSWER:")[-1].strip()
|
219 |
+
|
220 |
+
return {**st,
|
221 |
+
"final_answer": answer,
|
222 |
+
"tools_used": ["wiki_search"],
|
223 |
+
"perf": {"time": time.time() - t0, "provider": "Groq-Wiki"}}
|
224 |
except Exception as e:
|
225 |
+
return {**st, "final_answer": f"Error: {e}", "perf": {"error": str(e)}}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
226 |
|
227 |
+
def general_node(st: EnhancedAgentState) -> EnhancedAgentState:
|
228 |
+
"""Handle general queries"""
|
229 |
+
t0 = time.time()
|
230 |
+
try:
|
231 |
+
llm = self._get_llm("llama3-70b-8192")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
232 |
|
233 |
+
enhanced_query = f"""
|
234 |
+
Question: {st["query"]}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
|
236 |
+
Please provide a direct, accurate answer to this question.
|
237 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
238 |
|
239 |
+
sys_msg = SystemMessage(content=SYSTEM_PROMPT)
|
240 |
+
response = llm.invoke([sys_msg, HumanMessage(content=enhanced_query)])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
241 |
|
242 |
+
answer = response.content.strip()
|
243 |
+
if "FINAL ANSWER:" in answer:
|
244 |
+
answer = answer.split("FINAL ANSWER:")[-1].strip()
|
245 |
+
|
246 |
+
return {**st,
|
247 |
+
"final_answer": answer,
|
248 |
+
"perf": {"time": time.time() - t0, "provider": "Groq-General"}}
|
249 |
+
except Exception as e:
|
250 |
+
return {**st, "final_answer": f"Error: {e}", "perf": {"error": str(e)}}
|
251 |
+
|
252 |
+
# Build graph
|
253 |
+
g = StateGraph(EnhancedAgentState)
|
254 |
+
g.add_node("router", router)
|
255 |
+
g.add_node("math", math_node)
|
256 |
+
g.add_node("search", search_node)
|
257 |
+
g.add_node("wiki", wiki_node)
|
258 |
+
g.add_node("general", general_node)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
259 |
|
260 |
+
g.set_entry_point("router")
|
261 |
+
g.add_conditional_edges("router", lambda s: s["agent_type"], {
|
262 |
+
"math": "math",
|
263 |
+
"search": "search",
|
264 |
+
"wiki": "wiki",
|
265 |
+
"general": "general"
|
266 |
+
})
|
267 |
|
268 |
+
for node in ["math", "search", "wiki", "general"]:
|
269 |
+
g.add_edge(node, END)
|
270 |
+
|
271 |
+
return g.compile(checkpointer=MemorySaver())
|
272 |
+
|
273 |
+
def process_query(self, query: str) -> str:
|
274 |
+
"""Process a query through the working system"""
|
275 |
+
state = {
|
276 |
+
"messages": [HumanMessage(content=query)],
|
277 |
+
"query": query,
|
278 |
+
"agent_type": "",
|
279 |
+
"final_answer": "",
|
280 |
+
"perf": {},
|
281 |
+
"tools_used": []
|
282 |
+
}
|
283 |
+
config = {"configurable": {"thread_id": f"working_{hash(query)}"}}
|
284 |
|
|
|
|
|
|
|
|
|
285 |
try:
|
286 |
+
result = self.graph.invoke(state, config)
|
287 |
+
answer = result.get("final_answer", "").strip()
|
288 |
|
289 |
+
# Validation
|
290 |
+
if not answer or answer == query or len(answer.strip()) == 0:
|
291 |
+
return "Information not available"
|
292 |
|
293 |
+
return answer
|
294 |
except Exception as e:
|
295 |
+
return f"Error processing query: {e}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
296 |
|
297 |
+
# ---- Compatibility Classes ----
|
298 |
class UnifiedAgnoEnhancedSystem:
|
299 |
+
"""Compatibility wrapper for the working system"""
|
300 |
|
301 |
def __init__(self):
|
302 |
+
print("Initializing working system...")
|
303 |
+
self.agno_system = None # Not using Agno
|
304 |
+
self.working_system = WorkingMultiLLMSystem()
|
305 |
+
self.graph = self.working_system.graph
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
306 |
|
307 |
def process_query(self, query: str) -> str:
|
308 |
+
return self.working_system.process_query(query)
|
|
|
|
|
|
|
|
|
309 |
|
310 |
def get_system_info(self) -> Dict[str, Any]:
|
311 |
+
return {
|
312 |
+
"system": "working_multi_llm",
|
313 |
+
"agno_available": False,
|
314 |
+
"total_models": 1,
|
315 |
+
"active_agents": ["math", "search", "wiki", "general"]
|
316 |
+
}
|
317 |
|
318 |
+
# For backward compatibility
|
319 |
+
AgnoEnhancedAgentSystem = WorkingMultiLLMSystem
|
320 |
+
AgnoEnhancedModelManager = WorkingMultiLLMSystem
|
321 |
+
|
322 |
+
def build_graph(provider: str = "working"):
|
323 |
+
"""Build working graph"""
|
324 |
+
system = WorkingMultiLLMSystem()
|
325 |
+
return system.graph
|
326 |
|
|
|
327 |
if __name__ == "__main__":
|
328 |
+
# Test the working system
|
329 |
+
system = WorkingMultiLLMSystem()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
330 |
|
|
|
331 |
test_questions = [
|
332 |
+
"How many studio albums were published by Mercedes Sosa between 2000 and 2009?",
|
333 |
+
"What is 25 multiplied by 17?",
|
334 |
+
"Who nominated the only Featured Article on English Wikipedia about a dinosaur?"
|
|
|
|
|
335 |
]
|
336 |
|
337 |
+
print("Testing Working Multi-LLM System:")
|
|
|
|
|
|
|
338 |
for i, question in enumerate(test_questions, 1):
|
339 |
print(f"\nQuestion {i}: {question}")
|
|
|
340 |
answer = system.process_query(question)
|
341 |
print(f"Answer: {answer}")
|