josondev commited on
Commit
e292008
·
verified ·
1 Parent(s): 1d50ee8

Update veryfinal.py

Browse files
Files changed (1) hide show
  1. veryfinal.py +70 -176
veryfinal.py CHANGED
@@ -1,60 +1,43 @@
1
  import os
2
  import time
3
  import random
4
- from dotenv import load_dotenv
5
- from typing import List, Dict, Any, TypedDict, Annotated
6
  import operator
 
 
7
 
8
  from langchain_core.tools import tool
9
  from langchain_community.tools.tavily_search import TavilySearchResults
10
  from langchain_community.document_loaders import WikipediaLoader
11
- from langchain_community.vectorstores import FAISS
12
- from langchain.tools.retriever import create_retriever_tool
13
- from langchain_text_splitters import RecursiveCharacterTextSplitter
14
- from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
15
- from langchain_community.embeddings import SentenceTransformerEmbeddings
16
-
17
- from langgraph.graph import StateGraph, START, END
18
  from langgraph.checkpoint.memory import MemorySaver
 
 
19
 
20
- # Load environment variables
21
- load_dotenv()
22
 
23
- # ---- Tool Definitions ----
24
  @tool
25
- def multiply(a: int, b: int) -> int:
26
- """Multiply two integers and return the product."""
27
- return a * b
28
 
29
  @tool
30
- def add(a: int, b: int) -> int:
31
- """Add two integers and return the sum."""
32
- return a + b
33
 
34
  @tool
35
- def subtract(a: int, b: int) -> int:
36
- """Subtract the second integer from the first and return the difference."""
37
- return a - b
38
 
39
  @tool
40
  def divide(a: int, b: int) -> float:
41
- """Divide the first integer by the second and return the quotient."""
42
  if b == 0:
43
  raise ValueError("Cannot divide by zero.")
44
  return a / b
45
 
46
  @tool
47
- def modulus(a: int, b: int) -> int:
48
- """Return the remainder of the division of the first integer by the second."""
49
- return a % b
50
 
51
  @tool
52
  def optimized_web_search(query: str) -> str:
53
- """Perform an optimized web search using TavilySearchResults and return concatenated document snippets."""
54
  try:
55
- time.sleep(random.uniform(1, 2))
56
- search_tool = TavilySearchResults(max_results=2)
57
- docs = search_tool.invoke({"query": query})
58
  return "\n\n---\n\n".join(
59
  f"<Doc url='{d.get('url','')}'>{d.get('content','')[:500]}</Doc>"
60
  for d in docs
@@ -64,172 +47,93 @@ def optimized_web_search(query: str) -> str:
64
 
65
  @tool
66
  def optimized_wiki_search(query: str) -> str:
67
- """Perform an optimized Wikipedia search and return concatenated document snippets."""
68
  try:
69
- time.sleep(random.uniform(0.5, 1))
70
  docs = WikipediaLoader(query=query, load_max_docs=1).load()
71
  return "\n\n---\n\n".join(
72
- f"<Doc src='{d.metadata.get('source', 'Wikipedia')}'>{d.page_content[:800]}</Doc>"
73
  for d in docs
74
  )
75
  except Exception as e:
76
  return f"Wikipedia search failed: {e}"
77
 
78
- # ---- LLM Integrations with Error Handling ----
79
- try:
80
- from langchain_groq import ChatGroq
81
- GROQ_AVAILABLE = True
82
- except ImportError:
83
- GROQ_AVAILABLE = False
84
-
85
- import requests
86
-
87
- def deepseek_generate(prompt, api_key=None):
88
- """Call DeepSeek API directly."""
89
- if not api_key:
90
- return "DeepSeek API key not provided"
91
-
92
- url = "https://api.deepseek.com/v1/chat/completions"
93
- headers = {
94
- "Authorization": f"Bearer {api_key}",
95
- "Content-Type": "application/json"
96
- }
97
- data = {
98
- "model": "deepseek-chat",
99
- "messages": [{"role": "user", "content": prompt}],
100
- "stream": False
101
- }
102
- try:
103
- resp = requests.post(url, headers=headers, json=data, timeout=30)
104
- resp.raise_for_status()
105
- choices = resp.json().get("choices", [])
106
- if choices and "message" in choices[0]:
107
- return choices[0]["message"].get("content", "")
108
- return "No response from DeepSeek"
109
- except Exception as e:
110
- return f"DeepSeek API error: {e}"
111
-
112
- def baidu_ernie_generate(prompt, api_key=None):
113
- """Call Baidu ERNIE API."""
114
- if not api_key:
115
- return "Baidu ERNIE API key not provided"
116
-
117
- # Baidu ERNIE API endpoint (replace with actual endpoint)
118
- url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions"
119
- headers = {
120
- "Content-Type": "application/json",
121
- "Authorization": f"Bearer {api_key}"
122
- }
123
- data = {
124
- "messages": [{"role": "user", "content": prompt}],
125
- "temperature": 0.1,
126
- "top_p": 0.8
127
- }
128
- try:
129
- resp = requests.post(url, headers=headers, json=data, timeout=30)
130
- resp.raise_for_status()
131
- result = resp.json().get("result", "")
132
- return result if result else "No response from Baidu ERNIE"
133
- except Exception as e:
134
- return f"Baidu ERNIE API error: {e}"
135
-
136
- # ---- Graph State ----
137
  class EnhancedAgentState(TypedDict):
138
- messages: Annotated[List[HumanMessage|AIMessage], operator.add]
139
  query: str
140
  agent_type: str
141
  final_answer: str
142
- perf: Dict[str,Any]
143
  agno_resp: str
144
 
145
  class HybridLangGraphMultiLLMSystem:
146
- def __init__(self, provider="groq"):
147
- self.provider = provider
 
 
 
148
  self.tools = [
149
  multiply, add, subtract, divide, modulus,
150
  optimized_web_search, optimized_wiki_search
151
  ]
152
  self.graph = self._build_graph()
153
 
 
 
 
 
 
 
 
154
  def _build_graph(self):
155
- # Initialize Groq LLM with error handling
156
- groq_llm = None
157
-
158
- if GROQ_AVAILABLE and os.getenv("GROQ_API_KEY"):
159
- try:
160
- # Use Groq for multiple model access
161
- groq_llm = ChatGroq(
162
- model="llama-3.1-70b-versatile", # Updated to a current model
163
- temperature=0,
164
- api_key=os.getenv("GROQ_API_KEY")
165
- )
166
- except Exception as e:
167
- print(f"Failed to initialize Groq: {e}")
168
 
169
  def router(st: EnhancedAgentState) -> EnhancedAgentState:
170
  q = st["query"].lower()
171
- if "groq" in q and groq_llm:
172
- t = "groq"
173
- elif "deepseek" in q:
174
  t = "deepseek"
175
- elif "ernie" in q or "baidu" in q:
176
- t = "baidu"
177
- else:
178
- # Default to first available provider
179
- if groq_llm:
180
- t = "groq"
181
- elif os.getenv("DEEPSEEK_API_KEY"):
182
- t = "deepseek"
183
- else:
184
- t = "baidu"
185
  return {**st, "agent_type": t}
186
 
187
- def groq_node(st: EnhancedAgentState) -> EnhancedAgentState:
188
- if not groq_llm:
189
- return {**st, "final_answer": "Groq not available", "perf": {"error": "No Groq LLM"}}
190
-
191
  t0 = time.time()
192
- try:
193
- sys = SystemMessage(content="You are a helpful AI assistant. Provide accurate and detailed answers. Be concise but thorough.")
194
- res = groq_llm.invoke([sys, HumanMessage(content=st["query"])])
195
- return {**st, "final_answer": res.content, "perf": {"time": time.time() - t0, "prov": "Groq"}}
196
- except Exception as e:
197
- return {**st, "final_answer": f"Groq error: {e}", "perf": {"error": str(e)}}
198
 
199
- def deepseek_node(st: EnhancedAgentState) -> EnhancedAgentState:
200
  t0 = time.time()
201
- try:
202
- prompt = f"You are a helpful AI assistant. Provide accurate and detailed answers. Be concise but thorough.\n\nUser question: {st['query']}"
203
- resp = deepseek_generate(prompt, api_key=os.getenv("DEEPSEEK_API_KEY"))
204
- return {**st, "final_answer": resp, "perf": {"time": time.time() - t0, "prov": "DeepSeek"}}
205
- except Exception as e:
206
- return {**st, "final_answer": f"DeepSeek error: {e}", "perf": {"error": str(e)}}
207
 
208
- def baidu_node(st: EnhancedAgentState) -> EnhancedAgentState:
209
  t0 = time.time()
210
- try:
211
- prompt = f"You are a helpful AI assistant. Provide accurate and detailed answers. Be concise but thorough.\n\nUser question: {st['query']}"
212
- resp = baidu_ernie_generate(prompt, api_key=os.getenv("BAIDU_API_KEY"))
213
- return {**st, "final_answer": resp, "perf": {"time": time.time() - t0, "prov": "Baidu ERNIE"}}
214
- except Exception as e:
215
- return {**st, "final_answer": f"Baidu ERNIE error: {e}", "perf": {"error": str(e)}}
216
-
217
- def pick(st: EnhancedAgentState) -> str:
218
- return st["agent_type"]
219
 
220
  g = StateGraph(EnhancedAgentState)
221
  g.add_node("router", router)
222
- g.add_node("groq", groq_node)
 
223
  g.add_node("deepseek", deepseek_node)
224
- g.add_node("baidu", baidu_node)
225
  g.set_entry_point("router")
226
- g.add_conditional_edges("router", pick, {
227
- "groq": "groq",
228
- "deepseek": "deepseek",
229
- "baidu": "baidu"
230
- })
231
- for n in ["groq", "deepseek", "baidu"]:
232
- g.add_edge(n, END)
233
  return g.compile(checkpointer=MemorySaver())
234
 
235
  def process_query(self, q: str) -> str:
@@ -242,25 +146,15 @@ class HybridLangGraphMultiLLMSystem:
242
  "agno_resp": ""
243
  }
244
  cfg = {"configurable": {"thread_id": f"hyb_{hash(q)}"}}
245
- try:
246
- out = self.graph.invoke(state, cfg)
247
- raw_answer = out.get("final_answer", "No answer generated")
248
-
249
- # Clean up the answer
250
- if isinstance(raw_answer, str):
251
- return raw_answer.strip()
252
- return str(raw_answer)
253
- except Exception as e:
254
- return f"Error processing query: {e}"
255
 
256
- # Function expected by app.py
257
- def build_graph(provider="groq"):
258
- """Build and return the graph for the agent system."""
259
- system = HybridLangGraphMultiLLMSystem(provider=provider)
260
- return system.graph
261
 
262
  if __name__ == "__main__":
263
- query = "What are the main benefits of using multiple LLM providers?"
264
- system = HybridLangGraphMultiLLMSystem()
265
- result = system.process_query(query)
266
- print("LangGraph Multi-LLM Result:", result)
 
 
1
  import os
2
  import time
3
  import random
 
 
4
  import operator
5
+ from typing import List, Dict, Any, TypedDict, Annotated
6
+ from dotenv import load_dotenv
7
 
8
  from langchain_core.tools import tool
9
  from langchain_community.tools.tavily_search import TavilySearchResults
10
  from langchain_community.document_loaders import WikipediaLoader
11
+ from langgraph.graph import StateGraph, END
 
 
 
 
 
 
12
  from langgraph.checkpoint.memory import MemorySaver
13
+ from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
14
+ from langchain_groq import ChatGroq
15
 
16
+ load_dotenv() # expects GROQ_API_KEY in your .env
 
17
 
 
18
  @tool
19
+ def multiply(a: int, b: int) -> int: return a * b
 
 
20
 
21
  @tool
22
+ def add(a: int, b: int) -> int: return a + b
 
 
23
 
24
  @tool
25
+ def subtract(a: int, b: int) -> int: return a - b
 
 
26
 
27
  @tool
28
  def divide(a: int, b: int) -> float:
 
29
  if b == 0:
30
  raise ValueError("Cannot divide by zero.")
31
  return a / b
32
 
33
  @tool
34
+ def modulus(a: int, b: int) -> int: return a % b
 
 
35
 
36
  @tool
37
  def optimized_web_search(query: str) -> str:
 
38
  try:
39
+ time.sleep(random.uniform(0.7, 1.5))
40
+ docs = TavilySearchResults(max_results=2).invoke(query=query)
 
41
  return "\n\n---\n\n".join(
42
  f"<Doc url='{d.get('url','')}'>{d.get('content','')[:500]}</Doc>"
43
  for d in docs
 
47
 
48
  @tool
49
  def optimized_wiki_search(query: str) -> str:
 
50
  try:
51
+ time.sleep(random.uniform(0.3, 1))
52
  docs = WikipediaLoader(query=query, load_max_docs=1).load()
53
  return "\n\n---\n\n".join(
54
+ f"<Doc src='{d.metadata.get('source','Wikipedia')}'>{d.page_content[:800]}</Doc>"
55
  for d in docs
56
  )
57
  except Exception as e:
58
  return f"Wikipedia search failed: {e}"
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  class EnhancedAgentState(TypedDict):
61
+ messages: Annotated[List[HumanMessage | AIMessage], operator.add]
62
  query: str
63
  agent_type: str
64
  final_answer: str
65
+ perf: Dict[str, Any]
66
  agno_resp: str
67
 
68
  class HybridLangGraphMultiLLMSystem:
69
+ """
70
+ Router that picks between Groq-hosted Llama-3 8B, Llama-3 70B (default),
71
+ and Groq-hosted DeepSeek-Chat according to the query content.
72
+ """
73
+ def __init__(self):
74
  self.tools = [
75
  multiply, add, subtract, divide, modulus,
76
  optimized_web_search, optimized_wiki_search
77
  ]
78
  self.graph = self._build_graph()
79
 
80
+ def _llm(self, model_name: str):
81
+ return ChatGroq(
82
+ model=model_name,
83
+ temperature=0,
84
+ api_key=os.getenv("GROQ_API_KEY")
85
+ )
86
+
87
  def _build_graph(self):
88
+ llama8_llm = self._llm("llama3-8b-8192")
89
+ llama70_llm = self._llm("llama3-70b-8192")
90
+ deepseek_llm = self._llm("deepseek-chat")
 
 
 
 
 
 
 
 
 
 
91
 
92
  def router(st: EnhancedAgentState) -> EnhancedAgentState:
93
  q = st["query"].lower()
94
+ if "llama-8" in q:
95
+ t = "llama8"
96
+ elif "deepseek" in q:
97
  t = "deepseek"
98
+ else:
99
+ t = "llama70"
 
 
 
 
 
 
 
 
100
  return {**st, "agent_type": t}
101
 
102
+ def llama8_node(st: EnhancedAgentState) -> EnhancedAgentState:
 
 
 
103
  t0 = time.time()
104
+ sys = SystemMessage(content="You are a helpful AI assistant.")
105
+ res = llama8_llm.invoke([sys, HumanMessage(content=st["query"])])
106
+ return {**st,
107
+ "final_answer": res.content,
108
+ "perf": {"time": time.time() - t0, "prov": "Groq-Llama3-8B"}}
 
109
 
110
+ def llama70_node(st: EnhancedAgentState) -> EnhancedAgentState:
111
  t0 = time.time()
112
+ sys = SystemMessage(content="You are a helpful AI assistant.")
113
+ res = llama70_llm.invoke([sys, HumanMessage(content=st["query"])])
114
+ return {**st,
115
+ "final_answer": res.content,
116
+ "perf": {"time": time.time() - t0, "prov": "Groq-Llama3-70B"}}
 
117
 
118
+ def deepseek_node(st: EnhancedAgentState) -> EnhancedAgentState:
119
  t0 = time.time()
120
+ sys = SystemMessage(content="You are a helpful AI assistant.")
121
+ res = deepseek_llm.invoke([sys, HumanMessage(content=st["query"])])
122
+ return {**st,
123
+ "final_answer": res.content,
124
+ "perf": {"time": time.time() - t0, "prov": "Groq-DeepSeek"}}
 
 
 
 
125
 
126
  g = StateGraph(EnhancedAgentState)
127
  g.add_node("router", router)
128
+ g.add_node("llama8", llama8_node)
129
+ g.add_node("llama70", llama70_node)
130
  g.add_node("deepseek", deepseek_node)
 
131
  g.set_entry_point("router")
132
+ g.add_conditional_edges("router", lambda s: s["agent_type"],
133
+ {"llama8": "llama8", "llama70": "llama70", "deepseek": "deepseek"})
134
+ g.add_edge("llama8", END)
135
+ g.add_edge("llama70", END)
136
+ g.add_edge("deepseek", END)
 
 
137
  return g.compile(checkpointer=MemorySaver())
138
 
139
  def process_query(self, q: str) -> str:
 
146
  "agno_resp": ""
147
  }
148
  cfg = {"configurable": {"thread_id": f"hyb_{hash(q)}"}}
149
+ out = self.graph.invoke(state, cfg)
150
+ return out.get("final_answer", "").strip()
 
 
 
 
 
 
 
 
151
 
152
+ def build_graph(provider: str | None = None):
153
+ return HybridLangGraphMultiLLMSystem().graph
 
 
 
154
 
155
  if __name__ == "__main__":
156
+ qa_system = HybridLangGraphMultiLLMSystem()
157
+ # Test each model
158
+ print(qa_system.process_query("llama-8: What is the capital of France?"))
159
+ print(qa_system.process_query("llama-70: Tell me about quantum mechanics."))
160
+ print(qa_system.process_query("deepseek: What is the Riemann Hypothesis?"))