josondev commited on
Commit
ca98093
·
verified ·
1 Parent(s): 15b6891

Update veryfinal.py

Browse files
Files changed (1) hide show
  1. veryfinal.py +269 -256
veryfinal.py CHANGED
@@ -1,326 +1,339 @@
1
  """
2
- Enhanced Multi-LLM Agent System - CORRECTED VERSION
3
- Fixes the issue where questions are returned as answers
4
  """
5
 
6
  import os
7
  import time
8
  import random
9
- import operator
10
- from typing import List, Dict, Any, TypedDict, Annotated
11
  from dotenv import load_dotenv
 
 
12
 
13
- from langchain_core.tools import tool
14
- from langchain_community.tools.tavily_search import TavilySearchResults
15
- from langchain_community.document_loaders import WikipediaLoader
16
- from langgraph.graph import StateGraph, END
17
  from langgraph.checkpoint.memory import MemorySaver
18
- from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
 
19
  from langchain_groq import ChatGroq
 
 
 
 
 
 
 
 
20
 
21
  load_dotenv()
22
 
23
- # Enhanced system prompt for proper question-answering
24
- ENHANCED_SYSTEM_PROMPT = (
25
- "You are a helpful assistant tasked with answering questions using available tools. "
26
- "Follow these guidelines:\n"
27
- "1. Read the question carefully and understand what is being asked\n"
28
- "2. Use available tools when you need external information\n"
29
- "3. Provide accurate, specific answers based on the information you find\n"
30
- "4. For numbers: don't use commas or units unless specified\n"
31
- "5. For strings: don't use articles or abbreviations, write digits in plain text\n"
32
- "6. Always end with 'FINAL ANSWER: [YOUR ANSWER]' where [YOUR ANSWER] is concise\n"
33
- "7. Never repeat the question as your answer\n"
34
- "8. If you cannot find the answer, state 'Information not available'\n"
35
- )
36
 
37
- # ---- Tool Definitions ----
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  @tool
39
  def multiply(a: int, b: int) -> int:
40
- """Multiply two integers and return the product."""
 
 
 
 
41
  return a * b
42
 
43
  @tool
44
  def add(a: int, b: int) -> int:
45
- """Add two integers and return the sum."""
 
 
 
 
46
  return a + b
47
 
48
  @tool
49
  def subtract(a: int, b: int) -> int:
50
- """Subtract the second integer from the first and return the difference."""
 
 
 
 
51
  return a - b
52
 
53
  @tool
54
  def divide(a: int, b: int) -> float:
55
- """Divide the first integer by the second and return the quotient."""
 
 
 
 
56
  if b == 0:
57
  raise ValueError("Cannot divide by zero.")
58
  return a / b
59
 
60
  @tool
61
  def modulus(a: int, b: int) -> int:
62
- """Return the remainder when dividing the first integer by the second."""
 
 
 
 
63
  return a % b
64
 
65
  @tool
66
- def optimized_web_search(query: str) -> str:
67
- """Perform web search using TavilySearchResults."""
 
 
 
68
  try:
69
- time.sleep(random.uniform(0.7, 1.5))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  search_tool = TavilySearchResults(max_results=3)
71
- docs = search_tool.invoke({"query": query})
72
- return "\n\n---\n\n".join(
73
- f"<Doc url='{d.get('url','')}'>{d.get('content','')[:800]}</Doc>"
74
- for d in docs
75
- )
 
 
 
 
76
  except Exception as e:
77
  return f"Web search failed: {e}"
78
 
79
  @tool
80
- def optimized_wiki_search(query: str) -> str:
81
- """Perform Wikipedia search and return content."""
 
 
 
82
  try:
83
- time.sleep(random.uniform(0.3, 1))
84
- docs = WikipediaLoader(query=query, load_max_docs=2).load()
85
- return "\n\n---\n\n".join(
86
- f"<Doc src='{d.metadata.get('source','Wikipedia')}'>{d.page_content[:1000]}</Doc>"
87
- for d in docs
88
- )
 
 
 
 
89
  except Exception as e:
90
- return f"Wikipedia search failed: {e}"
91
 
92
- # ---- Enhanced Agent State ----
93
- class EnhancedAgentState(TypedDict):
94
- """State structure for the enhanced agent system."""
95
- messages: Annotated[List[HumanMessage | AIMessage], operator.add]
96
- query: str
97
- agent_type: str
98
- final_answer: str
99
- perf: Dict[str, Any]
100
- agno_resp: str
101
 
102
- # ---- Enhanced Multi-LLM System ----
103
- class HybridLangGraphMultiLLMSystem:
104
- """Enhanced question-answering system with proper response handling."""
105
-
106
- def __init__(self):
107
- """Initialize the enhanced multi-LLM system."""
108
- self.tools = [
109
- multiply, add, subtract, divide, modulus,
110
- optimized_web_search, optimized_wiki_search
111
- ]
112
- self.graph = self._build_graph()
113
 
114
- def _llm(self, model_name: str) -> ChatGroq:
115
- """Create a Groq LLM instance."""
116
- return ChatGroq(
117
- model=model_name,
118
- temperature=0,
119
- api_key=os.getenv("GROQ_API_KEY")
 
 
 
 
 
 
 
 
120
  )
 
 
 
 
 
121
 
122
- def _build_graph(self) -> StateGraph:
123
- """Build the LangGraph state machine with proper response handling."""
124
- # Initialize LLMs
125
- llama8_llm = self._llm("llama3-8b-8192")
126
- llama70_llm = self._llm("llama3-70b-8192")
127
- deepseek_llm = self._llm("deepseek-chat")
128
-
129
- def router(st: EnhancedAgentState) -> EnhancedAgentState:
130
- """Route queries to appropriate LLM based on content analysis."""
131
- q = st["query"].lower()
132
-
133
- # Enhanced routing logic
134
- if any(keyword in q for keyword in ["calculate", "compute", "math", "multiply", "add", "subtract", "divide"]):
135
- t = "llama70" # Use more powerful model for calculations
136
- elif any(keyword in q for keyword in ["search", "find", "lookup", "wikipedia", "information about"]):
137
- t = "search_enhanced" # Use search-enhanced processing
138
- elif "deepseek" in q or any(keyword in q for keyword in ["analyze", "reasoning", "complex"]):
139
- t = "deepseek"
140
- elif "llama-8" in q:
141
- t = "llama8"
142
- elif len(q.split()) > 20: # Complex queries
143
- t = "llama70"
144
- else:
145
- t = "llama8" # Default for simple queries
146
-
147
- return {**st, "agent_type": t}
148
-
149
- def llama8_node(st: EnhancedAgentState) -> EnhancedAgentState:
150
- """Process query with Llama-3 8B model."""
151
- t0 = time.time()
152
- try:
153
- # Create enhanced prompt with context
154
- enhanced_query = f"""
155
- Question: {st["query"]}
156
-
157
- Please provide a direct, accurate answer to this question. Do not repeat the question.
158
- """
159
-
160
- sys = SystemMessage(content=ENHANCED_SYSTEM_PROMPT)
161
- res = llama8_llm.invoke([sys, HumanMessage(content=enhanced_query)])
162
-
163
- # Extract and clean the answer
164
- answer = res.content.strip()
165
- if "FINAL ANSWER:" in answer:
166
- answer = answer.split("FINAL ANSWER:")[-1].strip()
167
-
168
- return {**st,
169
- "final_answer": answer,
170
- "perf": {"time": time.time() - t0, "prov": "Groq-Llama3-8B"}}
171
- except Exception as e:
172
- return {**st, "final_answer": f"Error: {e}", "perf": {"error": str(e)}}
173
 
174
- def llama70_node(st: EnhancedAgentState) -> EnhancedAgentState:
175
- """Process query with Llama-3 70B model."""
176
- t0 = time.time()
 
 
 
 
 
177
  try:
178
- # Create enhanced prompt with context
179
- enhanced_query = f"""
180
- Question: {st["query"]}
181
-
182
- Please provide a direct, accurate answer to this question. Do not repeat the question.
183
- """
184
-
185
- sys = SystemMessage(content=ENHANCED_SYSTEM_PROMPT)
186
- res = llama70_llm.invoke([sys, HumanMessage(content=enhanced_query)])
187
-
188
- # Extract and clean the answer
189
- answer = res.content.strip()
190
- if "FINAL ANSWER:" in answer:
191
- answer = answer.split("FINAL ANSWER:")[-1].strip()
192
-
193
- return {**st,
194
- "final_answer": answer,
195
- "perf": {"time": time.time() - t0, "prov": "Groq-Llama3-70B"}}
196
  except Exception as e:
197
- return {**st, "final_answer": f"Error: {e}", "perf": {"error": str(e)}}
 
 
 
 
 
 
198
 
199
- def deepseek_node(st: EnhancedAgentState) -> EnhancedAgentState:
200
- """Process query with DeepSeek model."""
201
- t0 = time.time()
202
- try:
203
- # Create enhanced prompt with context
204
- enhanced_query = f"""
205
- Question: {st["query"]}
206
-
207
- Please provide a direct, accurate answer to this question. Do not repeat the question.
208
- """
209
-
210
- sys = SystemMessage(content=ENHANCED_SYSTEM_PROMPT)
211
- res = deepseek_llm.invoke([sys, HumanMessage(content=enhanced_query)])
212
-
213
- # Extract and clean the answer
214
- answer = res.content.strip()
215
- if "FINAL ANSWER:" in answer:
216
- answer = answer.split("FINAL ANSWER:")[-1].strip()
217
 
218
- return {**st,
219
- "final_answer": answer,
220
- "perf": {"time": time.time() - t0, "prov": "Groq-DeepSeek"}}
221
- except Exception as e:
222
- return {**st, "final_answer": f"Error: {e}", "perf": {"error": str(e)}}
 
 
 
 
 
 
 
 
223
 
224
- def search_enhanced_node(st: EnhancedAgentState) -> EnhancedAgentState:
225
- """Process query with search enhancement."""
226
- t0 = time.time()
 
 
 
 
 
 
227
 
228
- try:
229
- # Determine search strategy
230
- query = st["query"]
231
- search_results = ""
232
-
233
- if any(keyword in query.lower() for keyword in ["wikipedia", "wiki"]):
234
- search_results = optimized_wiki_search.invoke({"query": query})
235
  else:
236
- search_results = optimized_web_search.invoke({"query": query})
237
-
238
- # Create comprehensive prompt with search results
239
- enhanced_query = f"""
240
- Original Question: {query}
241
-
242
- Search Results:
243
- {search_results}
244
-
245
- Based on the search results above, provide a direct answer to the original question.
246
- Extract the specific information requested. Do not repeat the question.
247
- """
248
-
249
- sys = SystemMessage(content=ENHANCED_SYSTEM_PROMPT)
250
- res = llama70_llm.invoke([sys, HumanMessage(content=enhanced_query)])
251
-
252
- # Extract and clean the answer
253
- answer = res.content.strip()
254
- if "FINAL ANSWER:" in answer:
255
- answer = answer.split("FINAL ANSWER:")[-1].strip()
256
 
257
- return {**st,
258
- "final_answer": answer,
259
- "perf": {"time": time.time() - t0, "prov": "Search-Enhanced-Llama70"}}
260
- except Exception as e:
261
- return {**st, "final_answer": f"Error: {e}", "perf": {"error": str(e)}}
262
-
263
- # Build graph
264
- g = StateGraph(EnhancedAgentState)
265
- g.add_node("router", router)
266
- g.add_node("llama8", llama8_node)
267
- g.add_node("llama70", llama70_node)
268
- g.add_node("deepseek", deepseek_node)
269
- g.add_node("search_enhanced", search_enhanced_node)
270
-
271
- g.set_entry_point("router")
272
- g.add_conditional_edges("router", lambda s: s["agent_type"], {
273
- "llama8": "llama8",
274
- "llama70": "llama70",
275
- "deepseek": "deepseek",
276
- "search_enhanced": "search_enhanced"
277
- })
278
 
279
- for node in ["llama8", "llama70", "deepseek", "search_enhanced"]:
280
- g.add_edge(node, END)
281
-
282
- return g.compile(checkpointer=MemorySaver())
283
 
284
- def process_query(self, q: str) -> str:
285
- """Process a query and return the final answer."""
286
- state = {
287
- "messages": [HumanMessage(content=q)],
288
- "query": q,
289
- "agent_type": "",
290
- "final_answer": "",
291
- "perf": {},
292
- "agno_resp": ""
 
 
 
 
 
 
 
 
 
293
  }
294
- cfg = {"configurable": {"thread_id": f"qa_{hash(q)}"}}
295
-
296
- try:
297
- out = self.graph.invoke(state, cfg)
298
- answer = out.get("final_answer", "").strip()
299
-
300
- # Ensure we don't return the question as the answer
301
- if answer == q or answer.startswith(q):
302
- return "Information not available"
303
-
304
- return answer if answer else "No answer generated"
305
- except Exception as e:
306
- return f"Error processing query: {e}"
307
 
308
- def build_graph(provider: str | None = None) -> StateGraph:
309
- """Build and return the graph for the enhanced agent system."""
310
- return HybridLangGraphMultiLLMSystem().graph
311
 
312
- if __name__ == "__main__":
313
- # Test the system
314
- qa_system = HybridLangGraphMultiLLMSystem()
 
315
 
316
  test_questions = [
 
317
  "What is 25 multiplied by 17?",
318
- "Who was the first president of the United States?",
319
- "Find information about artificial intelligence on Wikipedia"
320
  ]
321
 
322
  for question in test_questions:
323
- print(f"Question: {question}")
324
- answer = qa_system.process_query(question)
325
- print(f"Answer: {answer}")
326
- print("-" * 50)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  """
2
+ Enhanced LangGraph Agent with Multi-LLM Support and Proper Question Answering
3
+ Combines your original LangGraph structure with enhanced response handling
4
  """
5
 
6
  import os
7
  import time
8
  import random
 
 
9
  from dotenv import load_dotenv
10
+ from typing import List, Dict, Any, TypedDict, Annotated
11
+ import operator
12
 
13
+ from langgraph.graph import START, StateGraph, MessagesState, END
14
+ from langgraph.prebuilt import tools_condition, ToolNode
 
 
15
  from langgraph.checkpoint.memory import MemorySaver
16
+
17
+ from langchain_google_genai import ChatGoogleGenerativeAI
18
  from langchain_groq import ChatGroq
19
+ from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
20
+ from langchain_community.tools.tavily_search import TavilySearchResults
21
+ from langchain_community.document_loaders import WikipediaLoader, ArxivLoader
22
+ from langchain_community.vectorstores import SupabaseVectorStore
23
+ from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
24
+ from langchain_core.tools import tool
25
+ from langchain.tools.retriever import create_retriever_tool
26
+ from supabase.client import Client, create_client
27
 
28
  load_dotenv()
29
 
30
+ # Enhanced system prompt for better question answering
31
+ ENHANCED_SYSTEM_PROMPT = """You are a helpful assistant tasked with answering questions using a set of tools.
 
 
 
 
 
 
 
 
 
 
 
32
 
33
+ CRITICAL INSTRUCTIONS:
34
+ 1. Read the question carefully and understand what specific information is being asked
35
+ 2. Use the appropriate tools to find the exact information requested
36
+ 3. For factual questions, search for current and accurate information
37
+ 4. For calculations, use the math tools provided
38
+ 5. Always provide specific, direct answers - never repeat the question as your answer
39
+ 6. If you cannot find the information, state "Information not available"
40
+ 7. Format your final response as: FINAL ANSWER: [your specific answer]
41
+
42
+ ANSWER FORMAT RULES:
43
+ - For numbers: provide just the number without commas or units unless specified
44
+ - For names/strings: provide the exact name or term without articles
45
+ - For lists: provide comma-separated values
46
+ - Be concise and specific in your final answer
47
+
48
+ Remember: Your job is to ANSWER the question, not repeat it back."""
49
+
50
+ # ---- Enhanced Tool Definitions ----
51
  @tool
52
  def multiply(a: int, b: int) -> int:
53
+ """Multiply two numbers.
54
+ Args:
55
+ a: first int
56
+ b: second int
57
+ """
58
  return a * b
59
 
60
  @tool
61
  def add(a: int, b: int) -> int:
62
+ """Add two numbers.
63
+ Args:
64
+ a: first int
65
+ b: second int
66
+ """
67
  return a + b
68
 
69
  @tool
70
  def subtract(a: int, b: int) -> int:
71
+ """Subtract two numbers.
72
+ Args:
73
+ a: first int
74
+ b: second int
75
+ """
76
  return a - b
77
 
78
  @tool
79
  def divide(a: int, b: int) -> float:
80
+ """Divide two numbers.
81
+ Args:
82
+ a: first int
83
+ b: second int
84
+ """
85
  if b == 0:
86
  raise ValueError("Cannot divide by zero.")
87
  return a / b
88
 
89
  @tool
90
  def modulus(a: int, b: int) -> int:
91
+ """Get the modulus of two numbers.
92
+ Args:
93
+ a: first int
94
+ b: second int
95
+ """
96
  return a % b
97
 
98
  @tool
99
+ def wiki_search(query: str) -> str:
100
+ """Search Wikipedia for a query and return maximum 2 results.
101
+ Args:
102
+ query: The search query.
103
+ """
104
  try:
105
+ time.sleep(random.uniform(0.5, 1.0)) # Rate limiting
106
+ search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
107
+ if not search_docs:
108
+ return "No Wikipedia results found"
109
+
110
+ formatted_search_docs = "\n\n---\n\n".join([
111
+ f'<Document source="{doc.metadata.get("source", "Wikipedia")}" title="{doc.metadata.get("title", "")}">\n{doc.page_content[:1500]}\n</Document>'
112
+ for doc in search_docs
113
+ ])
114
+ return formatted_search_docs
115
+ except Exception as e:
116
+ return f"Wikipedia search failed: {e}"
117
+
118
+ @tool
119
+ def web_search(query: str) -> str:
120
+ """Search Tavily for a query and return maximum 3 results.
121
+ Args:
122
+ query: The search query.
123
+ """
124
+ try:
125
+ time.sleep(random.uniform(0.7, 1.2)) # Rate limiting
126
  search_tool = TavilySearchResults(max_results=3)
127
+ search_docs = search_tool.invoke({"query": query})
128
+ if not search_docs:
129
+ return "No web search results found"
130
+
131
+ formatted_search_docs = "\n\n---\n\n".join([
132
+ f'<Document source="{doc.get("url", "")}">\n{doc.get("content", "")[:1200]}\n</Document>'
133
+ for doc in search_docs
134
+ ])
135
+ return formatted_search_docs
136
  except Exception as e:
137
  return f"Web search failed: {e}"
138
 
139
  @tool
140
+ def arxiv_search(query: str) -> str:
141
+ """Search Arxiv for a query and return maximum 3 results.
142
+ Args:
143
+ query: The search query.
144
+ """
145
  try:
146
+ time.sleep(random.uniform(0.5, 1.0)) # Rate limiting
147
+ search_docs = ArxivLoader(query=query, load_max_docs=3).load()
148
+ if not search_docs:
149
+ return "No ArXiv results found"
150
+
151
+ formatted_search_docs = "\n\n---\n\n".join([
152
+ f'<Document source="{doc.metadata.get("source", "ArXiv")}" title="{doc.metadata.get("title", "")}">\n{doc.page_content[:1000]}\n</Document>'
153
+ for doc in search_docs
154
+ ])
155
+ return formatted_search_docs
156
  except Exception as e:
157
+ return f"ArXiv search failed: {e}"
158
 
159
+ # Initialize tools list
160
+ tools = [
161
+ multiply, add, subtract, divide, modulus,
162
+ wiki_search, web_search, arxiv_search
163
+ ]
 
 
 
 
164
 
165
+ # Enhanced State for better tracking
166
+ class EnhancedState(MessagesState):
167
+ """Enhanced state with additional tracking"""
168
+ query: str = ""
169
+ tools_used: List[str] = []
170
+ search_results: str = ""
 
 
 
 
 
171
 
172
+ def build_graph(provider: str = "groq"):
173
+ """Build the enhanced graph with proper error handling and response formatting"""
174
+
175
+ # Initialize LLM based on provider
176
+ if provider == "google":
177
+ llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
178
+ elif provider == "groq":
179
+ llm = ChatGroq(model="llama3-70b-8192", temperature=0) # Using more reliable model
180
+ elif provider == "huggingface":
181
+ llm = ChatHuggingFace(
182
+ llm=HuggingFaceEndpoint(
183
+ url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
184
+ temperature=0,
185
+ ),
186
  )
187
+ else:
188
+ raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.")
189
+
190
+ # Bind tools to LLM
191
+ llm_with_tools = llm.bind_tools(tools)
192
 
193
+ # Initialize vector store if available
194
+ vector_store = None
195
+ try:
196
+ if os.getenv("SUPABASE_URL") and os.getenv("SUPABASE_SERVICE_KEY"):
197
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
198
+ supabase: Client = create_client(
199
+ os.environ.get("SUPABASE_URL"),
200
+ os.environ.get("SUPABASE_SERVICE_KEY")
201
+ )
202
+ vector_store = SupabaseVectorStore(
203
+ client=supabase,
204
+ embedding=embeddings,
205
+ table_name="documents",
206
+ query_name="match_documents_langchain",
207
+ )
208
+ except Exception as e:
209
+ print(f"Vector store initialization failed: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
 
211
+ def retriever(state: MessagesState):
212
+ """Enhanced retriever node with fallback"""
213
+ messages = state["messages"]
214
+ query = messages[-1].content if messages else ""
215
+
216
+ # Try to get similar questions from vector store
217
+ similar_context = ""
218
+ if vector_store:
219
  try:
220
+ similar_questions = vector_store.similarity_search(query, k=1)
221
+ if similar_questions:
222
+ similar_context = f"\n\nSimilar example for reference:\n{similar_questions[0].page_content}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
  except Exception as e:
224
+ print(f"Vector search failed: {e}")
225
+
226
+ # Enhanced system message with context
227
+ enhanced_prompt = ENHANCED_SYSTEM_PROMPT + similar_context
228
+ sys_msg = SystemMessage(content=enhanced_prompt)
229
+
230
+ return {"messages": [sys_msg] + messages}
231
 
232
+ def assistant(state: MessagesState):
233
+ """Enhanced assistant node with better response handling"""
234
+ try:
235
+ response = llm_with_tools.invoke(state["messages"])
236
+
237
+ # Ensure response is properly formatted
238
+ if hasattr(response, 'content'):
239
+ content = response.content
 
 
 
 
 
 
 
 
 
 
240
 
241
+ # Check if this is just repeating the question
242
+ original_query = state["messages"][-1].content if state["messages"] else ""
243
+ if content.strip() == original_query.strip():
244
+ # Force a better response
245
+ enhanced_messages = state["messages"] + [
246
+ HumanMessage(content=f"Please provide a specific answer to this question, do not repeat the question: {original_query}")
247
+ ]
248
+ response = llm_with_tools.invoke(enhanced_messages)
249
+
250
+ return {"messages": [response]}
251
+ except Exception as e:
252
+ error_response = AIMessage(content=f"Error processing request: {e}")
253
+ return {"messages": [error_response]}
254
 
255
+ def format_final_answer(state: MessagesState):
256
+ """Format the final answer properly"""
257
+ messages = state["messages"]
258
+ if not messages:
259
+ return {"messages": [AIMessage(content="FINAL ANSWER: Information not available")]}
260
+
261
+ last_message = messages[-1]
262
+ if hasattr(last_message, 'content'):
263
+ content = last_message.content
264
 
265
+ # Ensure proper formatting
266
+ if "FINAL ANSWER:" not in content:
267
+ # Extract the key information and format it
268
+ if content.strip():
269
+ formatted_content = f"FINAL ANSWER: {content.strip()}"
 
 
270
  else:
271
+ formatted_content = "FINAL ANSWER: Information not available"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272
 
273
+ formatted_message = AIMessage(content=formatted_content)
274
+ return {"messages": messages[:-1] + [formatted_message]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275
 
276
+ return {"messages": messages}
 
 
 
277
 
278
+ # Build the graph
279
+ builder = StateGraph(MessagesState)
280
+
281
+ # Add nodes
282
+ builder.add_node("retriever", retriever)
283
+ builder.add_node("assistant", assistant)
284
+ builder.add_node("tools", ToolNode(tools))
285
+ builder.add_node("formatter", format_final_answer)
286
+
287
+ # Add edges
288
+ builder.add_edge(START, "retriever")
289
+ builder.add_edge("retriever", "assistant")
290
+ builder.add_conditional_edges(
291
+ "assistant",
292
+ tools_condition,
293
+ {
294
+ "tools": "tools",
295
+ "__end__": "formatter"
296
  }
297
+ )
298
+ builder.add_edge("tools", "assistant")
299
+ builder.add_edge("formatter", END)
 
 
 
 
 
 
 
 
 
 
300
 
301
+ # Compile graph with checkpointer
302
+ return builder.compile(checkpointer=MemorySaver())
 
303
 
304
+ # Test function
305
+ def test_agent():
306
+ """Test the agent with sample questions"""
307
+ graph = build_graph(provider="groq")
308
 
309
  test_questions = [
310
+ "How many studio albums were published by Mercedes Sosa between 2000 and 2009?",
311
  "What is 25 multiplied by 17?",
312
+ "Who nominated the only Featured Article on English Wikipedia about a dinosaur that was promoted in November 2004?"
 
313
  ]
314
 
315
  for question in test_questions:
316
+ print(f"\nQuestion: {question}")
317
+ print("-" * 60)
318
+
319
+ try:
320
+ messages = [HumanMessage(content=question)]
321
+ config = {"configurable": {"thread_id": f"test_{hash(question)}"}}
322
+ result = graph.invoke({"messages": messages}, config)
323
+
324
+ if result and "messages" in result:
325
+ final_message = result["messages"][-1]
326
+ if hasattr(final_message, 'content'):
327
+ print(f"Answer: {final_message.content}")
328
+ else:
329
+ print(f"Answer: {final_message}")
330
+ else:
331
+ print("Answer: No response generated")
332
+ except Exception as e:
333
+ print(f"Error: {e}")
334
+
335
+ print()
336
+
337
+ if __name__ == "__main__":
338
+ # Run tests
339
+ test_agent()