josondev commited on
Commit
a35ea13
·
verified ·
1 Parent(s): decae1d

Update veryfinal.py

Browse files
Files changed (1) hide show
  1. veryfinal.py +161 -60
veryfinal.py CHANGED
@@ -8,7 +8,7 @@ import operator
8
  from langchain_core.tools import tool
9
  from langchain_community.tools.tavily_search import TavilySearchResults
10
  from langchain_community.document_loaders import WikipediaLoader
11
- from langchain_community.vectorstores import Chroma
12
  from langchain.tools.retriever import create_retriever_tool
13
  from langchain_text_splitters import RecursiveCharacterTextSplitter
14
  from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
@@ -17,6 +17,9 @@ from langchain_community.embeddings import SentenceTransformerEmbeddings
17
  from langgraph.graph import StateGraph, START, END
18
  from langgraph.checkpoint.memory import MemorySaver
19
 
 
 
 
20
  # ---- Tool Definitions ----
21
  @tool
22
  def multiply(a: int, b: int) -> int:
@@ -50,7 +53,8 @@ def optimized_web_search(query: str) -> str:
50
  """Perform an optimized web search using TavilySearchResults and return concatenated document snippets."""
51
  try:
52
  time.sleep(random.uniform(1, 2))
53
- docs = TavilySearchResults(max_results=2).invoke(query=query)
 
54
  return "\n\n---\n\n".join(
55
  f"<Doc url='{d.get('url','')}'>{d.get('content','')[:500]}</Doc>"
56
  for d in docs
@@ -65,52 +69,70 @@ def optimized_wiki_search(query: str) -> str:
65
  time.sleep(random.uniform(0.5, 1))
66
  docs = WikipediaLoader(query=query, load_max_docs=1).load()
67
  return "\n\n---\n\n".join(
68
- f"<Doc src='{d.metadata['source']}'>{d.page_content[:800]}</Doc>"
69
  for d in docs
70
  )
71
  except Exception as e:
72
  return f"Wikipedia search failed: {e}"
73
 
74
- # ---- LLM Integrations ----
75
- load_dotenv()
76
-
77
- # Groq (Llama 3, DeepSeek, etc. via LangChain integration)
78
- from langchain_groq import ChatGroq
 
79
 
80
- # NVIDIA NIM (LangChain integration)
81
- from langchain_nvidia_ai_endpoints import ChatNVIDIA
 
 
 
82
 
83
- from google import genai
 
 
 
 
84
 
85
- # DeepSeek (via Ollama or API)
86
  import requests
87
 
88
- # Baidu ERNIE (assume open source API, use requests as placeholder)
89
- def baidu_ernie_generate(prompt, api_key=None):
90
- """Call Baidu ERNIE open source API (pseudo-code, replace with actual endpoint and params)."""
91
- # Example endpoint and payload for demonstration purposes only:
92
- url = "https://api.baidu.com/ernie/v1/generate"
93
- headers = {"Authorization": f"Bearer {api_key}"}
94
- data = {"model": "ernie-4.5", "prompt": prompt}
95
- try:
96
- resp = requests.post(url, headers=headers, json=data, timeout=30)
97
- return resp.json().get("result", "")
98
- except Exception as e:
99
- return f"ERNIE API error: {e}"
100
-
101
  def deepseek_generate(prompt, api_key=None):
102
- """Call DeepSeek open source API (pseudo-code, replace with actual endpoint and params)."""
 
 
 
103
  url = "https://api.deepseek.com/v1/chat/completions"
104
- headers = {"Authorization": f"Bearer {api_key}"}
105
- data = {"model": "deepseek-chat", "messages": [{"role": "user", "content": prompt}]}
 
 
 
 
 
 
 
106
  try:
107
  resp = requests.post(url, headers=headers, json=data, timeout=30)
108
- return resp.json().get("choices", [{}])[0].get("message", {}).get("content", "")
 
 
 
 
109
  except Exception as e:
110
  return f"DeepSeek API error: {e}"
111
 
112
- # ---- Graph State and System ----
 
 
 
 
 
 
 
 
 
113
 
 
114
  class EnhancedAgentState(TypedDict):
115
  messages: Annotated[List[HumanMessage|AIMessage], operator.add]
116
  query: str
@@ -120,7 +142,8 @@ class EnhancedAgentState(TypedDict):
120
  agno_resp: str
121
 
122
  class HybridLangGraphMultiLLMSystem:
123
- def __init__(self):
 
124
  self.tools = [
125
  multiply, add, subtract, divide, modulus,
126
  optimized_web_search, optimized_wiki_search
@@ -128,47 +151,110 @@ class HybridLangGraphMultiLLMSystem:
128
  self.graph = self._build_graph()
129
 
130
  def _build_graph(self):
131
- groq_llm = ChatGroq(model="llama3-70b-8192", temperature=0, api_key=os.getenv("GROQ_API_KEY"))
132
- nvidia_llm = ChatNVIDIA(model="meta/llama3-70b-instruct", temperature=0, api_key=os.getenv("NVIDIA_API_KEY"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
 
134
  def router(st: EnhancedAgentState) -> EnhancedAgentState:
135
  q = st["query"].lower()
136
- if "groq" in q: t = "groq"
137
- elif "nvidia" in q: t = "nvidia"
138
- elif "gemini" in q or "google" in q: t = "gemini"
139
- elif "deepseek" in q: t = "deepseek"
140
- elif "ernie" in q or "baidu" in q: t = "baidu"
141
- else: t = "groq" # default
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
  return {**st, "agent_type": t}
143
 
144
  def groq_node(st: EnhancedAgentState) -> EnhancedAgentState:
 
 
 
145
  t0 = time.time()
146
- sys = SystemMessage(content="Answer as an expert.")
147
- res = groq_llm.invoke([sys, HumanMessage(content=st["query"])])
148
- return {**st, "final_answer": res.content, "perf": {"time": time.time() - t0, "prov": "Groq"}}
 
 
 
149
 
150
  def nvidia_node(st: EnhancedAgentState) -> EnhancedAgentState:
 
 
 
151
  t0 = time.time()
152
- sys = SystemMessage(content="Answer as an expert.")
153
- res = nvidia_llm.invoke([sys, HumanMessage(content=st["query"])])
154
- return {**st, "final_answer": res.content, "perf": {"time": time.time() - t0, "prov": "NVIDIA"}}
 
 
 
155
 
156
  def gemini_node(st: EnhancedAgentState) -> EnhancedAgentState:
 
 
 
157
  t0 = time.time()
158
- genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
159
- model = genai.GenerativeModel("gemini-1.5-pro-latest")
160
- res = model.generate_content(st["query"])
161
- return {**st, "final_answer": res.text, "perf": {"time": time.time() - t0, "prov": "Gemini"}}
 
 
 
 
 
 
 
162
 
163
  def deepseek_node(st: EnhancedAgentState) -> EnhancedAgentState:
164
  t0 = time.time()
165
- resp = deepseek_generate(st["query"], api_key=os.getenv("DEEPSEEK_API_KEY"))
166
- return {**st, "final_answer": resp, "perf": {"time": time.time() - t0, "prov": "DeepSeek"}}
 
 
 
167
 
168
  def baidu_node(st: EnhancedAgentState) -> EnhancedAgentState:
169
  t0 = time.time()
170
- resp = baidu_ernie_generate(st["query"], api_key=os.getenv("BAIDU_API_KEY"))
171
- return {**st, "final_answer": resp, "perf": {"time": time.time() - t0, "prov": "ERNIE"}}
 
 
 
172
 
173
  def pick(st: EnhancedAgentState) -> str:
174
  return st["agent_type"]
@@ -202,12 +288,27 @@ class HybridLangGraphMultiLLMSystem:
202
  "agno_resp": ""
203
  }
204
  cfg = {"configurable": {"thread_id": f"hyb_{hash(q)}"}}
205
- out = self.graph.invoke(state, cfg)
206
- raw_answer = out["final_answer"]
207
- parts = raw_answer.split('\n\n', 1)
208
- answer_part = parts[1].strip() if len(parts) > 1 else raw_answer.strip()
209
- return answer_part
 
 
 
 
 
 
 
 
 
 
 
 
 
210
 
211
  if __name__ == "__main__":
212
- query = "What are the names of the US presidents who were assassinated? (groq)"
213
- print("LangGraph Hybrid:", HybridLangGraphMultiLLMSystem().process_query(query))
 
 
 
8
  from langchain_core.tools import tool
9
  from langchain_community.tools.tavily_search import TavilySearchResults
10
  from langchain_community.document_loaders import WikipediaLoader
11
+ from langchain_community.vectorstores import FAISS
12
  from langchain.tools.retriever import create_retriever_tool
13
  from langchain_text_splitters import RecursiveCharacterTextSplitter
14
  from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
 
17
  from langgraph.graph import StateGraph, START, END
18
  from langgraph.checkpoint.memory import MemorySaver
19
 
20
+ # Load environment variables
21
+ load_dotenv()
22
+
23
  # ---- Tool Definitions ----
24
  @tool
25
  def multiply(a: int, b: int) -> int:
 
53
  """Perform an optimized web search using TavilySearchResults and return concatenated document snippets."""
54
  try:
55
  time.sleep(random.uniform(1, 2))
56
+ search_tool = TavilySearchResults(max_results=2)
57
+ docs = search_tool.invoke({"query": query})
58
  return "\n\n---\n\n".join(
59
  f"<Doc url='{d.get('url','')}'>{d.get('content','')[:500]}</Doc>"
60
  for d in docs
 
69
  time.sleep(random.uniform(0.5, 1))
70
  docs = WikipediaLoader(query=query, load_max_docs=1).load()
71
  return "\n\n---\n\n".join(
72
+ f"<Doc src='{d.metadata.get('source', 'Wikipedia')}'>{d.page_content[:800]}</Doc>"
73
  for d in docs
74
  )
75
  except Exception as e:
76
  return f"Wikipedia search failed: {e}"
77
 
78
+ # ---- LLM Integrations with Error Handling ----
79
+ try:
80
+ from langchain_groq import ChatGroq
81
+ GROQ_AVAILABLE = True
82
+ except ImportError:
83
+ GROQ_AVAILABLE = False
84
 
85
+ try:
86
+ from langchain_nvidia_ai_endpoints import ChatNVIDIA
87
+ NVIDIA_AVAILABLE = True
88
+ except ImportError:
89
+ NVIDIA_AVAILABLE = False
90
 
91
+ try:
92
+ import google.generativeai as genai
93
+ GEMINI_AVAILABLE = True
94
+ except ImportError:
95
+ GEMINI_AVAILABLE = False
96
 
 
97
  import requests
98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  def deepseek_generate(prompt, api_key=None):
100
+ """Call DeepSeek API."""
101
+ if not api_key:
102
+ return "DeepSeek API key not provided"
103
+
104
  url = "https://api.deepseek.com/v1/chat/completions"
105
+ headers = {
106
+ "Authorization": f"Bearer {api_key}",
107
+ "Content-Type": "application/json"
108
+ }
109
+ data = {
110
+ "model": "deepseek-chat",
111
+ "messages": [{"role": "user", "content": prompt}],
112
+ "stream": False
113
+ }
114
  try:
115
  resp = requests.post(url, headers=headers, json=data, timeout=30)
116
+ resp.raise_for_status()
117
+ choices = resp.json().get("choices", [])
118
+ if choices and "message" in choices[0]:
119
+ return choices[0]["message"].get("content", "")
120
+ return "No response from DeepSeek"
121
  except Exception as e:
122
  return f"DeepSeek API error: {e}"
123
 
124
+ def baidu_ernie_generate(prompt, api_key=None):
125
+ """Call Baidu ERNIE API (placeholder implementation)."""
126
+ if not api_key:
127
+ return "Baidu ERNIE API key not provided"
128
+
129
+ # Note: This is a placeholder. Replace with actual Baidu ERNIE API endpoint
130
+ try:
131
+ return f"Baidu ERNIE response for: {prompt[:50]}..."
132
+ except Exception as e:
133
+ return f"ERNIE API error: {e}"
134
 
135
+ # ---- Graph State ----
136
  class EnhancedAgentState(TypedDict):
137
  messages: Annotated[List[HumanMessage|AIMessage], operator.add]
138
  query: str
 
142
  agno_resp: str
143
 
144
  class HybridLangGraphMultiLLMSystem:
145
+ def __init__(self, provider="groq"):
146
+ self.provider = provider
147
  self.tools = [
148
  multiply, add, subtract, divide, modulus,
149
  optimized_web_search, optimized_wiki_search
 
151
  self.graph = self._build_graph()
152
 
153
  def _build_graph(self):
154
+ # Initialize LLMs with error handling
155
+ groq_llm = None
156
+ nvidia_llm = None
157
+
158
+ if GROQ_AVAILABLE and os.getenv("GROQ_API_KEY"):
159
+ try:
160
+ groq_llm = ChatGroq(
161
+ model="llama3-70b-8192",
162
+ temperature=0,
163
+ api_key=os.getenv("GROQ_API_KEY")
164
+ )
165
+ except Exception as e:
166
+ print(f"Failed to initialize Groq: {e}")
167
+
168
+ if NVIDIA_AVAILABLE and os.getenv("NVIDIA_API_KEY"):
169
+ try:
170
+ nvidia_llm = ChatNVIDIA(
171
+ model="meta/llama3-70b-instruct",
172
+ temperature=0,
173
+ api_key=os.getenv("NVIDIA_API_KEY")
174
+ )
175
+ except Exception as e:
176
+ print(f"Failed to initialize NVIDIA: {e}")
177
 
178
  def router(st: EnhancedAgentState) -> EnhancedAgentState:
179
  q = st["query"].lower()
180
+ if "groq" in q and groq_llm:
181
+ t = "groq"
182
+ elif "nvidia" in q and nvidia_llm:
183
+ t = "nvidia"
184
+ elif ("gemini" in q or "google" in q) and GEMINI_AVAILABLE:
185
+ t = "gemini"
186
+ elif "deepseek" in q:
187
+ t = "deepseek"
188
+ elif "ernie" in q or "baidu" in q:
189
+ t = "baidu"
190
+ else:
191
+ # Default to first available provider
192
+ if groq_llm:
193
+ t = "groq"
194
+ elif nvidia_llm:
195
+ t = "nvidia"
196
+ elif GEMINI_AVAILABLE:
197
+ t = "gemini"
198
+ else:
199
+ t = "deepseek"
200
  return {**st, "agent_type": t}
201
 
202
  def groq_node(st: EnhancedAgentState) -> EnhancedAgentState:
203
+ if not groq_llm:
204
+ return {**st, "final_answer": "Groq not available", "perf": {"error": "No Groq LLM"}}
205
+
206
  t0 = time.time()
207
+ try:
208
+ sys = SystemMessage(content="You are a helpful AI assistant. Provide accurate and detailed answers.")
209
+ res = groq_llm.invoke([sys, HumanMessage(content=st["query"])])
210
+ return {**st, "final_answer": res.content, "perf": {"time": time.time() - t0, "prov": "Groq"}}
211
+ except Exception as e:
212
+ return {**st, "final_answer": f"Groq error: {e}", "perf": {"error": str(e)}}
213
 
214
  def nvidia_node(st: EnhancedAgentState) -> EnhancedAgentState:
215
+ if not nvidia_llm:
216
+ return {**st, "final_answer": "NVIDIA not available", "perf": {"error": "No NVIDIA LLM"}}
217
+
218
  t0 = time.time()
219
+ try:
220
+ sys = SystemMessage(content="You are a helpful AI assistant. Provide accurate and detailed answers.")
221
+ res = nvidia_llm.invoke([sys, HumanMessage(content=st["query"])])
222
+ return {**st, "final_answer": res.content, "perf": {"time": time.time() - t0, "prov": "NVIDIA"}}
223
+ except Exception as e:
224
+ return {**st, "final_answer": f"NVIDIA error: {e}", "perf": {"error": str(e)}}
225
 
226
  def gemini_node(st: EnhancedAgentState) -> EnhancedAgentState:
227
+ if not GEMINI_AVAILABLE:
228
+ return {**st, "final_answer": "Gemini not available", "perf": {"error": "Gemini not installed"}}
229
+
230
  t0 = time.time()
231
+ try:
232
+ api_key = os.getenv("GEMINI_API_KEY")
233
+ if not api_key:
234
+ return {**st, "final_answer": "Gemini API key not provided", "perf": {"error": "No API key"}}
235
+
236
+ genai.configure(api_key=api_key)
237
+ model = genai.GenerativeModel("gemini-1.5-pro-latest")
238
+ res = model.generate_content(st["query"])
239
+ return {**st, "final_answer": res.text, "perf": {"time": time.time() - t0, "prov": "Gemini"}}
240
+ except Exception as e:
241
+ return {**st, "final_answer": f"Gemini error: {e}", "perf": {"error": str(e)}}
242
 
243
  def deepseek_node(st: EnhancedAgentState) -> EnhancedAgentState:
244
  t0 = time.time()
245
+ try:
246
+ resp = deepseek_generate(st["query"], api_key=os.getenv("DEEPSEEK_API_KEY"))
247
+ return {**st, "final_answer": resp, "perf": {"time": time.time() - t0, "prov": "DeepSeek"}}
248
+ except Exception as e:
249
+ return {**st, "final_answer": f"DeepSeek error: {e}", "perf": {"error": str(e)}}
250
 
251
  def baidu_node(st: EnhancedAgentState) -> EnhancedAgentState:
252
  t0 = time.time()
253
+ try:
254
+ resp = baidu_ernie_generate(st["query"], api_key=os.getenv("BAIDU_API_KEY"))
255
+ return {**st, "final_answer": resp, "perf": {"time": time.time() - t0, "prov": "ERNIE"}}
256
+ except Exception as e:
257
+ return {**st, "final_answer": f"ERNIE error: {e}", "perf": {"error": str(e)}}
258
 
259
  def pick(st: EnhancedAgentState) -> str:
260
  return st["agent_type"]
 
288
  "agno_resp": ""
289
  }
290
  cfg = {"configurable": {"thread_id": f"hyb_{hash(q)}"}}
291
+ try:
292
+ out = self.graph.invoke(state, cfg)
293
+ raw_answer = out.get("final_answer", "No answer generated")
294
+
295
+ # Clean up the answer
296
+ if isinstance(raw_answer, str):
297
+ parts = raw_answer.split('\n\n')
298
+ answer_part = parts[1].strip() if len(parts) > 1 and len(parts[1].strip()) > 10 else raw_answer.strip()
299
+ return answer_part
300
+ return str(raw_answer)
301
+ except Exception as e:
302
+ return f"Error processing query: {e}"
303
+
304
+ # Function expected by app.py
305
+ def build_graph(provider="groq"):
306
+ """Build and return the graph for the agent system."""
307
+ system = HybridLangGraphMultiLLMSystem(provider=provider)
308
+ return system.graph
309
 
310
  if __name__ == "__main__":
311
+ query = "What are the names of the US presidents who were assassinated?"
312
+ system = HybridLangGraphMultiLLMSystem()
313
+ result = system.process_query(query)
314
+ print("LangGraph Hybrid Result:", result)