josondev commited on
Commit
5092552
·
verified ·
1 Parent(s): 294850e

Update veryfinal.py

Browse files
Files changed (1) hide show
  1. veryfinal.py +366 -59
veryfinal.py CHANGED
@@ -1,8 +1,13 @@
 
 
 
 
 
1
  import os
2
  import time
3
  import random
4
  import operator
5
- from typing import List, Dict, Any, TypedDict, Annotated
6
  from dotenv import load_dotenv
7
 
8
  from langchain_core.tools import tool
@@ -13,43 +18,116 @@ from langgraph.checkpoint.memory import MemorySaver
13
  from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
14
  from langchain_groq import ChatGroq
15
 
16
- load_dotenv() # expects GROQ_API_KEY in your .env
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
 
18
  @tool
19
- def multiply(a: int, b: int) -> int:
20
- '''multiplies two numbers'''
 
 
 
 
 
 
 
 
 
21
  return a * b
22
 
23
  @tool
24
- def add(a: int, b: int) -> int:
25
- '''adds two numbers'''
 
 
 
 
 
 
 
 
 
26
  return a + b
27
 
28
  @tool
29
- def subtract(a: int, b: int) -> int:
30
- '''subtracts two numbers'''
 
 
 
 
 
 
 
 
 
31
  return a - b
32
 
33
  @tool
34
  def divide(a: int, b: int) -> float:
35
- '''divides two numbers'''
 
 
 
 
 
 
 
 
 
 
 
 
36
  if b == 0:
37
  raise ValueError("Cannot divide by zero.")
38
  return a / b
39
 
40
  @tool
41
- def modulus(a: int, b: int) -> int:
42
- '''returns the remainder while dividing two numbers'''
 
 
 
 
 
 
 
 
 
43
  return a % b
44
 
45
  @tool
46
  def optimized_web_search(query: str) -> str:
47
- '''searches the web using tavily'''
 
 
 
 
 
 
 
 
48
  try:
49
  time.sleep(random.uniform(0.7, 1.5))
50
- docs = TavilySearchResults(max_results=2).invoke(query=query)
51
  return "\n\n---\n\n".join(
52
- f"<Doc url='{d.get('url','')}'>{d.get('content','')[:500]}</Doc>"
53
  for d in docs
54
  )
55
  except Exception as e:
@@ -57,115 +135,344 @@ def optimized_web_search(query: str) -> str:
57
 
58
  @tool
59
  def optimized_wiki_search(query: str) -> str:
60
- '''searches wikipedia'''
 
 
 
 
 
 
 
 
61
  try:
62
  time.sleep(random.uniform(0.3, 1))
63
- docs = WikipediaLoader(query=query, load_max_docs=1).load()
64
  return "\n\n---\n\n".join(
65
- f"<Doc src='{d.metadata.get('source','Wikipedia')}'>{d.page_content[:800]}</Doc>"
66
  for d in docs
67
  )
68
  except Exception as e:
69
  return f"Wikipedia search failed: {e}"
70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  class EnhancedAgentState(TypedDict):
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  messages: Annotated[List[HumanMessage | AIMessage], operator.add]
73
  query: str
74
  agent_type: str
75
  final_answer: str
76
  perf: Dict[str, Any]
77
  agno_resp: str
 
 
78
 
79
- class HybridLangGraphMultiLLMSystem:
 
80
  """
81
- Router that picks between Groq-hosted Llama-3 8B, Llama-3 70B (default),
82
- and Groq-hosted DeepSeek-Chat according to the query content.
 
 
 
 
 
 
83
  """
 
84
  def __init__(self):
 
85
  self.tools = [
86
  multiply, add, subtract, divide, modulus,
87
  optimized_web_search, optimized_wiki_search
88
  ]
89
  self.graph = self._build_graph()
90
 
91
- def _llm(self, model_name: str):
 
 
 
 
 
 
 
 
 
92
  return ChatGroq(
93
  model=model_name,
94
  temperature=0,
95
  api_key=os.getenv("GROQ_API_KEY")
96
  )
97
 
98
- def _build_graph(self):
99
- llama8_llm = self._llm("llama3-8b-8192")
 
 
 
 
 
 
 
100
  llama70_llm = self._llm("llama3-70b-8192")
101
  deepseek_llm = self._llm("deepseek-chat")
102
 
103
  def router(st: EnhancedAgentState) -> EnhancedAgentState:
 
 
 
 
 
 
 
 
 
104
  q = st["query"].lower()
105
- if "llama-8" in q:
106
- t = "llama8"
107
- elif "deepseek" in q:
 
 
 
 
108
  t = "deepseek"
109
- else:
110
  t = "llama70"
111
- return {**st, "agent_type": t}
 
 
 
112
 
113
  def llama8_node(st: EnhancedAgentState) -> EnhancedAgentState:
 
114
  t0 = time.time()
115
- sys = SystemMessage(content="You are a helpful AI assistant.")
116
- res = llama8_llm.invoke([sys, HumanMessage(content=st["query"])])
117
- return {**st,
118
- "final_answer": res.content,
119
- "perf": {"time": time.time() - t0, "prov": "Groq-Llama3-8B"}}
 
 
 
 
 
 
 
120
 
121
  def llama70_node(st: EnhancedAgentState) -> EnhancedAgentState:
 
122
  t0 = time.time()
123
- sys = SystemMessage(content="You are a helpful AI assistant.")
124
- res = llama70_llm.invoke([sys, HumanMessage(content=st["query"])])
125
- return {**st,
126
- "final_answer": res.content,
127
- "perf": {"time": time.time() - t0, "prov": "Groq-Llama3-70B"}}
 
 
 
 
 
 
 
128
 
129
  def deepseek_node(st: EnhancedAgentState) -> EnhancedAgentState:
 
130
  t0 = time.time()
131
- sys = SystemMessage(content="You are a helpful AI assistant.")
132
- res = deepseek_llm.invoke([sys, HumanMessage(content=st["query"])])
133
- return {**st,
134
- "final_answer": res.content,
135
- "perf": {"time": time.time() - t0, "prov": "Groq-DeepSeek"}}
 
 
 
 
 
 
 
136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  g = StateGraph(EnhancedAgentState)
138
  g.add_node("router", router)
139
  g.add_node("llama8", llama8_node)
140
  g.add_node("llama70", llama70_node)
141
  g.add_node("deepseek", deepseek_node)
 
 
142
  g.set_entry_point("router")
143
- g.add_conditional_edges("router", lambda s: s["agent_type"],
144
- {"llama8": "llama8", "llama70": "llama70", "deepseek": "deepseek"})
145
- g.add_edge("llama8", END)
146
- g.add_edge("llama70", END)
147
- g.add_edge("deepseek", END)
 
 
 
 
 
148
  return g.compile(checkpointer=MemorySaver())
149
 
150
  def process_query(self, q: str) -> str:
 
 
 
 
 
 
 
 
 
151
  state = {
152
  "messages": [HumanMessage(content=q)],
153
  "query": q,
154
  "agent_type": "",
155
  "final_answer": "",
156
  "perf": {},
157
- "agno_resp": ""
 
 
158
  }
159
- cfg = {"configurable": {"thread_id": f"hyb_{hash(q)}"}}
160
- out = self.graph.invoke(state, cfg)
161
- return out.get("final_answer", "").strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
163
- def build_graph(provider: str | None = None):
164
- return HybridLangGraphMultiLLMSystem().graph
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
 
166
  if __name__ == "__main__":
167
- qa_system = HybridLangGraphMultiLLMSystem()
168
- # Test each model
169
- print(qa_system.process_query("llama-8: What is the capital of France?"))
170
- print(qa_system.process_query("llama-70: Tell me about quantum mechanics."))
171
- print(qa_system.process_query("deepseek: What is the Riemann Hypothesis?"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Enhanced Multi-LLM Agent System with Question-Answering Capabilities
3
+ Supports Groq (Llama-3 8B/70B, DeepSeek), Google Gemini, NVIDIA NIM, and Agno-style agents
4
+ """
5
+
6
  import os
7
  import time
8
  import random
9
  import operator
10
+ from typing import List, Dict, Any, TypedDict, Annotated, Optional
11
  from dotenv import load_dotenv
12
 
13
  from langchain_core.tools import tool
 
18
  from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
19
  from langchain_groq import ChatGroq
20
 
21
+ # Load environment variables
22
+ load_dotenv()
23
+
24
+ # Enhanced system prompt for question-answering tasks
25
+ ENHANCED_SYSTEM_PROMPT = (
26
+ "You are a helpful assistant tasked with answering questions using a set of tools. "
27
+ "You must provide accurate, comprehensive answers based on available information. "
28
+ "When answering questions, follow these guidelines:\n"
29
+ "1. Use available tools to gather information when needed\n"
30
+ "2. Provide precise, factual answers\n"
31
+ "3. For numbers: don't use commas or units unless specified\n"
32
+ "4. For strings: don't use articles or abbreviations, write digits in plain text\n"
33
+ "5. For lists: apply above rules based on element type\n"
34
+ "6. Always end with 'FINAL ANSWER: [YOUR ANSWER]'\n"
35
+ "7. Be concise but thorough in your reasoning\n"
36
+ "8. If you cannot find the answer, state that clearly"
37
+ )
38
 
39
+ # ---- Tool Definitions with Enhanced Docstrings ----
40
  @tool
41
+ def multiply(a: int, b: int) -> int:
42
+ """
43
+ Multiplies two integers and returns the product.
44
+
45
+ Args:
46
+ a (int): First integer
47
+ b (int): Second integer
48
+
49
+ Returns:
50
+ int: Product of a and b
51
+ """
52
  return a * b
53
 
54
  @tool
55
+ def add(a: int, b: int) -> int:
56
+ """
57
+ Adds two integers and returns the sum.
58
+
59
+ Args:
60
+ a (int): First integer
61
+ b (int): Second integer
62
+
63
+ Returns:
64
+ int: Sum of a and b
65
+ """
66
  return a + b
67
 
68
  @tool
69
+ def subtract(a: int, b: int) -> int:
70
+ """
71
+ Subtracts the second integer from the first and returns the difference.
72
+
73
+ Args:
74
+ a (int): First integer (minuend)
75
+ b (int): Second integer (subtrahend)
76
+
77
+ Returns:
78
+ int: Difference of a and b
79
+ """
80
  return a - b
81
 
82
  @tool
83
  def divide(a: int, b: int) -> float:
84
+ """
85
+ Divides the first integer by the second and returns the quotient.
86
+
87
+ Args:
88
+ a (int): Dividend
89
+ b (int): Divisor
90
+
91
+ Returns:
92
+ float: Quotient of a divided by b
93
+
94
+ Raises:
95
+ ValueError: If b is zero
96
+ """
97
  if b == 0:
98
  raise ValueError("Cannot divide by zero.")
99
  return a / b
100
 
101
  @tool
102
+ def modulus(a: int, b: int) -> int:
103
+ """
104
+ Returns the remainder when dividing the first integer by the second.
105
+
106
+ Args:
107
+ a (int): Dividend
108
+ b (int): Divisor
109
+
110
+ Returns:
111
+ int: Remainder of a divided by b
112
+ """
113
  return a % b
114
 
115
  @tool
116
  def optimized_web_search(query: str) -> str:
117
+ """
118
+ Performs an optimized web search using TavilySearchResults.
119
+
120
+ Args:
121
+ query (str): Search query string
122
+
123
+ Returns:
124
+ str: Concatenated search results with URLs and content snippets
125
+ """
126
  try:
127
  time.sleep(random.uniform(0.7, 1.5))
128
+ docs = TavilySearchResults(max_results=3).invoke(query=query)
129
  return "\n\n---\n\n".join(
130
+ f"<Doc url='{d.get('url','')}'>{d.get('content','')[:800]}</Doc>"
131
  for d in docs
132
  )
133
  except Exception as e:
 
135
 
136
  @tool
137
  def optimized_wiki_search(query: str) -> str:
138
+ """
139
+ Performs an optimized Wikipedia search and returns content snippets.
140
+
141
+ Args:
142
+ query (str): Wikipedia search query
143
+
144
+ Returns:
145
+ str: Wikipedia content with source attribution
146
+ """
147
  try:
148
  time.sleep(random.uniform(0.3, 1))
149
+ docs = WikipediaLoader(query=query, load_max_docs=2).load()
150
  return "\n\n---\n\n".join(
151
+ f"<Doc src='{d.metadata.get('source','Wikipedia')}'>{d.page_content[:1000]}</Doc>"
152
  for d in docs
153
  )
154
  except Exception as e:
155
  return f"Wikipedia search failed: {e}"
156
 
157
+ # ---- LLM Provider Integrations ----
158
+ try:
159
+ from langchain_nvidia_ai_endpoints import ChatNVIDIA
160
+ NVIDIA_AVAILABLE = True
161
+ except ImportError:
162
+ NVIDIA_AVAILABLE = False
163
+
164
+ try:
165
+ import google.generativeai as genai
166
+ from langchain_google_genai import ChatGoogleGenerativeAI
167
+ GOOGLE_AVAILABLE = True
168
+ except ImportError:
169
+ GOOGLE_AVAILABLE = False
170
+
171
+ # ---- Enhanced Agent State ----
172
  class EnhancedAgentState(TypedDict):
173
+ """
174
+ State structure for the enhanced multi-LLM agent system.
175
+
176
+ Attributes:
177
+ messages: List of conversation messages
178
+ query: Current query string
179
+ agent_type: Selected agent/LLM type
180
+ final_answer: Generated response
181
+ perf: Performance metrics
182
+ agno_resp: Agno-style response metadata
183
+ tools_used: List of tools used in processing
184
+ reasoning: Step-by-step reasoning process
185
+ """
186
  messages: Annotated[List[HumanMessage | AIMessage], operator.add]
187
  query: str
188
  agent_type: str
189
  final_answer: str
190
  perf: Dict[str, Any]
191
  agno_resp: str
192
+ tools_used: List[str]
193
+ reasoning: str
194
 
195
+ # ---- Enhanced Multi-LLM System ----
196
+ class EnhancedQuestionAnsweringSystem:
197
  """
198
+ Advanced question-answering system that routes queries to appropriate LLM providers
199
+ and uses tools to gather information for comprehensive answers.
200
+
201
+ Features:
202
+ - Multi-LLM routing (Groq, Google, NVIDIA)
203
+ - Tool integration for web search and calculations
204
+ - Structured reasoning and answer formatting
205
+ - Performance monitoring
206
  """
207
+
208
  def __init__(self):
209
+ """Initialize the enhanced question-answering system."""
210
  self.tools = [
211
  multiply, add, subtract, divide, modulus,
212
  optimized_web_search, optimized_wiki_search
213
  ]
214
  self.graph = self._build_graph()
215
 
216
+ def _llm(self, model_name: str) -> ChatGroq:
217
+ """
218
+ Create a Groq LLM instance.
219
+
220
+ Args:
221
+ model_name (str): Model identifier
222
+
223
+ Returns:
224
+ ChatGroq: Configured Groq LLM instance
225
+ """
226
  return ChatGroq(
227
  model=model_name,
228
  temperature=0,
229
  api_key=os.getenv("GROQ_API_KEY")
230
  )
231
 
232
+ def _build_graph(self) -> StateGraph:
233
+ """
234
+ Build the LangGraph state machine with enhanced question-answering capabilities.
235
+
236
+ Returns:
237
+ StateGraph: Compiled graph with routing logic
238
+ """
239
+ # Initialize LLMs
240
+ llama8_llm = self._llm("llama3-8b-8192")
241
  llama70_llm = self._llm("llama3-70b-8192")
242
  deepseek_llm = self._llm("deepseek-chat")
243
 
244
  def router(st: EnhancedAgentState) -> EnhancedAgentState:
245
+ """
246
+ Route queries to appropriate LLM based on complexity and content.
247
+
248
+ Args:
249
+ st (EnhancedAgentState): Current state
250
+
251
+ Returns:
252
+ EnhancedAgentState: Updated state with agent selection
253
+ """
254
  q = st["query"].lower()
255
+
256
+ # Route based on query characteristics
257
+ if any(keyword in q for keyword in ["calculate", "compute", "math", "number"]):
258
+ t = "llama70" # Use more powerful model for calculations
259
+ elif any(keyword in q for keyword in ["search", "find", "lookup", "wikipedia"]):
260
+ t = "search_enhanced" # Use search-enhanced processing
261
+ elif "deepseek" in q or any(keyword in q for keyword in ["analyze", "reasoning", "complex"]):
262
  t = "deepseek"
263
+ elif len(q.split()) > 20: # Complex queries
264
  t = "llama70"
265
+ else:
266
+ t = "llama8" # Default for simple queries
267
+
268
+ return {**st, "agent_type": t, "tools_used": [], "reasoning": ""}
269
 
270
  def llama8_node(st: EnhancedAgentState) -> EnhancedAgentState:
271
+ """Process query with Llama-3 8B model."""
272
  t0 = time.time()
273
+ try:
274
+ sys = SystemMessage(content=ENHANCED_SYSTEM_PROMPT)
275
+ res = llama8_llm.invoke([sys, HumanMessage(content=st["query"])])
276
+
277
+ reasoning = "Used Llama-3 8B for efficient processing of straightforward query."
278
+
279
+ return {**st,
280
+ "final_answer": res.content,
281
+ "reasoning": reasoning,
282
+ "perf": {"time": time.time() - t0, "prov": "Groq-Llama3-8B"}}
283
+ except Exception as e:
284
+ return {**st, "final_answer": f"Error: {e}", "perf": {"error": str(e)}}
285
 
286
  def llama70_node(st: EnhancedAgentState) -> EnhancedAgentState:
287
+ """Process query with Llama-3 70B model."""
288
  t0 = time.time()
289
+ try:
290
+ sys = SystemMessage(content=ENHANCED_SYSTEM_PROMPT)
291
+ res = llama70_llm.invoke([sys, HumanMessage(content=st["query"])])
292
+
293
+ reasoning = "Used Llama-3 70B for complex reasoning and detailed analysis."
294
+
295
+ return {**st,
296
+ "final_answer": res.content,
297
+ "reasoning": reasoning,
298
+ "perf": {"time": time.time() - t0, "prov": "Groq-Llama3-70B"}}
299
+ except Exception as e:
300
+ return {**st, "final_answer": f"Error: {e}", "perf": {"error": str(e)}}
301
 
302
  def deepseek_node(st: EnhancedAgentState) -> EnhancedAgentState:
303
+ """Process query with DeepSeek model."""
304
  t0 = time.time()
305
+ try:
306
+ sys = SystemMessage(content=ENHANCED_SYSTEM_PROMPT)
307
+ res = deepseek_llm.invoke([sys, HumanMessage(content=st["query"])])
308
+
309
+ reasoning = "Used DeepSeek for advanced reasoning and analytical tasks."
310
+
311
+ return {**st,
312
+ "final_answer": res.content,
313
+ "reasoning": reasoning,
314
+ "perf": {"time": time.time() - t0, "prov": "Groq-DeepSeek"}}
315
+ except Exception as e:
316
+ return {**st, "final_answer": f"Error: {e}", "perf": {"error": str(e)}}
317
 
318
+ def search_enhanced_node(st: EnhancedAgentState) -> EnhancedAgentState:
319
+ """Process query with search enhancement."""
320
+ t0 = time.time()
321
+ tools_used = []
322
+ reasoning_steps = []
323
+
324
+ try:
325
+ # Determine if we need web search or Wikipedia
326
+ query = st["query"]
327
+ search_results = ""
328
+
329
+ if any(keyword in query.lower() for keyword in ["wikipedia", "wiki"]):
330
+ search_results = optimized_wiki_search.invoke({"query": query})
331
+ tools_used.append("wikipedia_search")
332
+ reasoning_steps.append("Searched Wikipedia for relevant information")
333
+ else:
334
+ search_results = optimized_web_search.invoke({"query": query})
335
+ tools_used.append("web_search")
336
+ reasoning_steps.append("Performed web search for current information")
337
+
338
+ # Enhance query with search results
339
+ enhanced_query = f"""
340
+ Original Query: {query}
341
+
342
+ Search Results:
343
+ {search_results}
344
+
345
+ Based on the search results above, please provide a comprehensive answer to the original query.
346
+ """
347
+
348
+ sys = SystemMessage(content=ENHANCED_SYSTEM_PROMPT)
349
+ res = llama70_llm.invoke([sys, HumanMessage(content=enhanced_query)])
350
+
351
+ reasoning_steps.append("Used Llama-3 70B to analyze search results and generate comprehensive answer")
352
+ reasoning = " -> ".join(reasoning_steps)
353
+
354
+ return {**st,
355
+ "final_answer": res.content,
356
+ "tools_used": tools_used,
357
+ "reasoning": reasoning,
358
+ "perf": {"time": time.time() - t0, "prov": "Search-Enhanced-Llama70"}}
359
+ except Exception as e:
360
+ return {**st, "final_answer": f"Error: {e}", "perf": {"error": str(e)}}
361
+
362
+ # Build graph
363
  g = StateGraph(EnhancedAgentState)
364
  g.add_node("router", router)
365
  g.add_node("llama8", llama8_node)
366
  g.add_node("llama70", llama70_node)
367
  g.add_node("deepseek", deepseek_node)
368
+ g.add_node("search_enhanced", search_enhanced_node)
369
+
370
  g.set_entry_point("router")
371
+ g.add_conditional_edges("router", lambda s: s["agent_type"], {
372
+ "llama8": "llama8",
373
+ "llama70": "llama70",
374
+ "deepseek": "deepseek",
375
+ "search_enhanced": "search_enhanced"
376
+ })
377
+
378
+ for node in ["llama8", "llama70", "deepseek", "search_enhanced"]:
379
+ g.add_edge(node, END)
380
+
381
  return g.compile(checkpointer=MemorySaver())
382
 
383
  def process_query(self, q: str) -> str:
384
+ """
385
+ Process a query through the enhanced question-answering system.
386
+
387
+ Args:
388
+ q (str): Input query
389
+
390
+ Returns:
391
+ str: Generated response with proper formatting
392
+ """
393
  state = {
394
  "messages": [HumanMessage(content=q)],
395
  "query": q,
396
  "agent_type": "",
397
  "final_answer": "",
398
  "perf": {},
399
+ "agno_resp": "",
400
+ "tools_used": [],
401
+ "reasoning": ""
402
  }
403
+ cfg = {"configurable": {"thread_id": f"qa_{hash(q)}"}}
404
+
405
+ try:
406
+ out = self.graph.invoke(state, cfg)
407
+ answer = out.get("final_answer", "").strip()
408
+
409
+ # Ensure proper formatting
410
+ if not answer.startswith("FINAL ANSWER:"):
411
+ # Extract the actual answer if it's buried in explanation
412
+ if "FINAL ANSWER:" in answer:
413
+ answer = answer.split("FINAL ANSWER:")[-1].strip()
414
+ answer = f"FINAL ANSWER: {answer}"
415
+ else:
416
+ # Add FINAL ANSWER prefix if missing
417
+ answer = f"FINAL ANSWER: {answer}"
418
+
419
+ return answer
420
+ except Exception as e:
421
+ return f"FINAL ANSWER: Error processing query: {e}"
422
+
423
+ def build_graph(provider: str | None = None) -> StateGraph:
424
+ """
425
+ Build and return the graph for the enhanced question-answering system.
426
+
427
+ Args:
428
+ provider (str | None): Provider preference (optional)
429
+
430
+ Returns:
431
+ StateGraph: Compiled graph instance
432
+ """
433
+ return EnhancedQuestionAnsweringSystem().graph
434
 
435
+ # ---- Main Question-Answering Interface ----
436
+ class QuestionAnsweringAgent:
437
+ """
438
+ Main interface for the question-answering agent system.
439
+ """
440
+
441
+ def __init__(self):
442
+ """Initialize the question-answering agent."""
443
+ self.system = EnhancedQuestionAnsweringSystem()
444
+
445
+ def answer_question(self, question: str) -> str:
446
+ """
447
+ Answer a question using the enhanced multi-LLM system.
448
+
449
+ Args:
450
+ question (str): The question to answer
451
+
452
+ Returns:
453
+ str: Formatted answer with FINAL ANSWER prefix
454
+ """
455
+ return self.system.process_query(question)
456
 
457
  if __name__ == "__main__":
458
+ # Initialize the question-answering system
459
+ qa_agent = QuestionAnsweringAgent()
460
+
461
+ # Test with sample questions
462
+ test_questions = [
463
+ "How many studio albums were published by Mercedes Sosa between 2000 and 2009?",
464
+ "What is 25 multiplied by 17?",
465
+ "Find information about the capital of France on Wikipedia",
466
+ "What is the population of Tokyo according to recent data?"
467
+ ]
468
+
469
+ print("=" * 80)
470
+ print("Enhanced Question-Answering Agent System")
471
+ print("=" * 80)
472
+
473
+ for i, question in enumerate(test_questions, 1):
474
+ print(f"\nQuestion {i}: {question}")
475
+ print("-" * 60)
476
+ answer = qa_agent.answer_question(question)
477
+ print(answer)
478
+ print()