Threatthriver commited on
Commit
114a365
·
verified ·
1 Parent(s): 321be4e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +284 -99
app.py CHANGED
@@ -1,80 +1,226 @@
1
  import gradio as gr
2
  import os
3
  import time
 
 
4
  from groq import Groq
5
  import requests
6
  from bs4 import BeautifulSoup
7
  from urllib.parse import urljoin, urlparse
8
  import re
9
  import json
 
 
 
 
10
 
11
- # API Setup
 
12
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
13
- if not GROQ_API_KEY:
14
- raise ValueError("GROQ_API_KEY environment variable is not set.")
15
 
16
- client = Groq(api_key=GROQ_API_KEY)
 
17
 
18
- # Helper Functions
19
- def is_valid_url(url):
20
- try:
21
- result = urlparse(url)
22
- return all([result.scheme, result.netloc])
23
- except ValueError:
24
- return False
25
 
26
- def fetch_webpage(url):
27
- try:
28
- response = requests.get(url, timeout=10)
29
- response.raise_for_status()
30
- return response.text
31
- except requests.exceptions.RequestException as e:
32
- return f"Error fetching URL: {e}"
33
-
34
- def extract_text_from_html(html):
35
- soup = BeautifulSoup(html, 'html.parser')
36
- text = soup.get_text(separator=' ', strip=True)
37
- return text
38
-
39
- # Chat Logic
40
- async def chat_with_agent(user_input, chat_history):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  start_time = time.time()
42
  try:
43
- # Prepare chat history
44
- formatted_history = "\n".join([f"User: {msg[0]}\nAI: {msg[1]}" for msg in chat_history[-10:]])
45
-
46
- system_prompt = """You are TaskMaster, an advanced agentic AI designed to help users accomplish their goals through:
47
- 1. Understanding and breaking down complex tasks
48
- 2. Using available tools effectively
49
- 3. Providing creative solutions with occasional humor
50
- 4. Maintaining context and adapting to user needs
51
-
52
- Available tools:
53
- - Web scraping (URL required)
54
- - Internet search simulation
55
 
56
- You can take actions using:
57
- Action: take_action, Parameters: {"action":"scrape", "url":"https://example.com"}
58
- Action: take_action, Parameters: {"action":"search_internet", "query":"search query"}"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
  messages = [
61
  {"role": "system", "content": system_prompt},
62
  {"role": "user", "content": user_input}
63
  ]
64
 
65
- completion = client.chat.completions.create(
66
- messages=messages,
67
- temperature=0.7,
68
- max_tokens=2048,
69
- stream=True,
70
- stop=None
71
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
 
73
  response = ""
74
  chain_of_thought = ""
75
- tool_execution_count = 0
76
 
77
- for chunk in completion:
 
78
  if chunk.choices[0].delta and chunk.choices[0].delta.content:
79
  content = chunk.choices[0].delta.content
80
  response += content
@@ -82,101 +228,140 @@ async def chat_with_agent(user_input, chat_history):
82
  if "Chain of Thought:" in content:
83
  chain_of_thought += content.split("Chain of Thought:", 1)[-1]
84
 
85
- if "Action:" in content and tool_execution_count < 3:
 
86
  action_match = re.search(r"Action: (\w+), Parameters: (\{.*\})", content)
87
  if action_match:
88
- tool_execution_count += 1
89
  action = action_match.group(1)
90
- parameters = json.loads(action_match.group(2))
91
-
92
- if action == "take_action":
93
- if parameters.get("action") == "scrape":
94
- url = parameters.get("url")
95
- if is_valid_url(url):
96
- html_content = fetch_webpage(url)
97
- if not html_content.startswith("Error"):
98
- webpage_text = extract_text_from_html(html_content)
99
- response += f"\nWebpage Content: {webpage_text[:1000]}...\n"
100
- else:
101
- response += f"\nError scraping webpage: {html_content}\n"
102
- else:
103
- response += "\nInvalid URL provided.\n"
104
- elif parameters.get("action") == "search_internet":
105
- query = parameters.get("query")
106
- response += f"\nSearching for: {query}\nSimulated search results would appear here.\n"
 
107
 
108
  compute_time = time.time() - start_time
109
- token_usage = len(user_input.split()) + len(response.split())
110
- return response, chain_of_thought, f"Compute Time: {compute_time:.2f} seconds", f"Tokens used: {token_usage}"
 
111
 
112
  except Exception as e:
 
113
  return f"Error: {str(e)}", "", "Error occurred", ""
114
 
115
- # Gradio Interface
116
  def create_interface():
117
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
118
- gr.Markdown("""# 🤖 TaskMaster: Your AI Assistant
119
- Let me help you accomplish your goals through intelligent task execution!""")
 
 
120
 
121
  with gr.Row():
122
  with gr.Column(scale=6):
123
- chat_history = gr.Chatbot(label="Chat History", height=600)
 
 
 
 
124
  with gr.Column(scale=2):
125
- compute_time = gr.Textbox(label="Performance Metrics", interactive=False)
126
- chain_of_thought_display = gr.Textbox(label="Reasoning Process", interactive=False, lines=10)
127
- token_usage_display = gr.Textbox(label="Resource Usage", interactive=False)
 
 
 
 
 
 
128
 
129
  user_input = gr.Textbox(
130
  label="Your Request",
131
- placeholder="What would you like me to help you with?",
132
- lines=2
133
  )
134
 
135
  with gr.Row():
136
  send_button = gr.Button("Send", variant="primary")
137
- clear_button = gr.Button("Clear")
138
- export_button = gr.Button("Save Chat")
139
 
140
  async def handle_chat(chat_history, user_input):
141
  if not user_input.strip():
142
  return chat_history, "", "", ""
143
 
144
- ai_response, chain_of_thought, compute_info, token_usage = await chat_with_agent(user_input, chat_history)
 
 
 
 
 
145
  chat_history.append((user_input, ai_response))
146
  return chat_history, chain_of_thought, compute_info, token_usage
147
 
148
  def clear_chat():
 
149
  return [], "", "", ""
150
 
151
  def export_chat(chat_history):
152
  if not chat_history:
153
  return "No chat history to export.", ""
154
 
155
- filename = f"taskmaster_chat_{int(time.time())}.txt"
156
- chat_text = "\n".join([f"User: {item[0]}\nAI: {item[1]}\n" for item in chat_history])
 
 
 
157
 
158
  with open(filename, "w") as file:
159
  file.write(chat_text)
160
- return f"Chat saved to {filename}", ""
161
 
162
  # Event handlers
163
- send_button.click(handle_chat, inputs=[chat_history, user_input], outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
164
- clear_button.click(clear_chat, outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
165
- export_button.click(export_chat, inputs=[chat_history], outputs=[compute_time, chain_of_thought_display])
166
- user_input.submit(handle_chat, inputs=[chat_history, user_input], outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
167
-
168
- gr.Markdown("""### 🌟 Capabilities:
169
- - Task Analysis & Breakdown
170
- - Web Information Retrieval
171
- - Creative Problem-Solving
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  - Context-Aware Responses
173
- - Performance Tracking
174
- - Chat Export
 
175
  """)
176
 
177
  return demo
178
 
179
- # Launch the application
180
  if __name__ == "__main__":
181
  demo = create_interface()
182
- demo.launch()
 
1
  import gradio as gr
2
  import os
3
  import time
4
+ import asyncio
5
+ from cerebras.cloud.sdk import Cerebras
6
  from groq import Groq
7
  import requests
8
  from bs4 import BeautifulSoup
9
  from urllib.parse import urljoin, urlparse
10
  import re
11
  import json
12
+ import numpy as np
13
+ from datetime import datetime
14
+ import logging
15
+ import aiohttp
16
 
17
+ # Enhanced API Setup
18
+ CEREBRAS_API_KEY = os.getenv("CEREBRAS_API_KEY")
19
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
 
 
20
 
21
+ if not CEREBRAS_API_KEY or not GROQ_API_KEY:
22
+ raise ValueError("Both CEREBRAS_API_KEY and GROQ_API_KEY environment variables must be set.")
23
 
24
+ cerebras_client = Cerebras(api_key=CEREBRAS_API_KEY)
25
+ groq_client = Groq(api_key=GROQ_API_KEY)
 
 
 
 
 
26
 
27
+ # Configure logging
28
+ logging.basicConfig(
29
+ level=logging.INFO,
30
+ format='%(asctime)s - %(levelname)s - %(message)s',
31
+ filename='agent.log'
32
+ )
33
+
34
+ class EnhancedToolkit:
35
+ @staticmethod
36
+ async def fetch_webpage_async(url, timeout=10):
37
+ try:
38
+ async with aiohttp.ClientSession() as session:
39
+ async with session.get(url, timeout=timeout) as response:
40
+ if response.status == 200:
41
+ return await response.text()
42
+ return f"Error: HTTP {response.status}"
43
+ except Exception as e:
44
+ return f"Error fetching URL: {str(e)}"
45
+
46
+ @staticmethod
47
+ def extract_text_from_html(html):
48
+ soup = BeautifulSoup(html, 'html.parser')
49
+ # Remove script and style elements
50
+ for script in soup(["script", "style"]):
51
+ script.decompose()
52
+ text = soup.get_text(separator=' ', strip=True)
53
+ # Normalize whitespace
54
+ text = ' '.join(text.split())
55
+ return text
56
+
57
+ @staticmethod
58
+ def validate_url(url):
59
+ try:
60
+ result = urlparse(url)
61
+ return all([result.scheme, result.netloc])
62
+ except ValueError:
63
+ return False
64
+
65
+ @staticmethod
66
+ def summarize_text(text, max_length=500):
67
+ """Simple text summarization by extracting key sentences"""
68
+ sentences = text.split('. ')
69
+ if len(sentences) <= 3:
70
+ return text
71
+
72
+ # Simple importance scoring based on sentence length and position
73
+ scores = []
74
+ for i, sentence in enumerate(sentences):
75
+ score = len(sentence.split()) * (1.0 / (i + 1)) # Length and position weight
76
+ scores.append((score, sentence))
77
+
78
+ # Get top sentences
79
+ scores.sort(reverse=True)
80
+ summary = '. '.join(sent for _, sent in scores[:3]) + '.'
81
+ return summary
82
+
83
+ @staticmethod
84
+ def analyze_sentiment(text):
85
+ """Simple sentiment analysis"""
86
+ positive_words = set(['good', 'great', 'excellent', 'positive', 'amazing', 'wonderful'])
87
+ negative_words = set(['bad', 'poor', 'negative', 'terrible', 'awful', 'horrible'])
88
+
89
+ words = text.lower().split()
90
+ pos_count = sum(1 for word in words if word in positive_words)
91
+ neg_count = sum(1 for word in words if word in negative_words)
92
+
93
+ if pos_count > neg_count:
94
+ return 'positive'
95
+ elif neg_count > pos_count:
96
+ return 'negative'
97
+ return 'neutral'
98
+
99
+ class AgentCore:
100
+ def __init__(self):
101
+ self.toolkit = EnhancedToolkit()
102
+ self.tool_execution_count = 0
103
+ self.max_tools_per_turn = 5
104
+ self.context_window = []
105
+ self.max_context_items = 10
106
+
107
+ def update_context(self, user_input, ai_response):
108
+ self.context_window.append({
109
+ 'user_input': user_input,
110
+ 'ai_response': ai_response,
111
+ 'timestamp': datetime.now().isoformat()
112
+ })
113
+ if len(self.context_window) > self.max_context_items:
114
+ self.context_window.pop(0)
115
+
116
+ async def execute_tool(self, action, parameters):
117
+ if self.tool_execution_count >= self.max_tools_per_turn:
118
+ return "Tool usage limit reached for this turn."
119
+
120
+ self.tool_execution_count += 1
121
+
122
+ if action == "scrape":
123
+ url = parameters.get("url")
124
+ if not self.toolkit.validate_url(url):
125
+ return "Invalid URL provided."
126
+
127
+ html_content = await self.toolkit.fetch_webpage_async(url)
128
+ if html_content.startswith("Error"):
129
+ return html_content
130
+
131
+ text_content = self.toolkit.extract_text_from_html(html_content)
132
+ summary = self.toolkit.summarize_text(text_content)
133
+ sentiment = self.toolkit.analyze_sentiment(text_content)
134
+
135
+ return {
136
+ 'summary': summary,
137
+ 'sentiment': sentiment,
138
+ 'full_text': text_content[:1000] + '...' if len(text_content) > 1000 else text_content
139
+ }
140
+
141
+ elif action == "search":
142
+ query = parameters.get("query")
143
+ return f"Simulated search for: {query}\nThis would connect to a search API in production."
144
+
145
+ elif action == "analyze":
146
+ text = parameters.get("text")
147
+ if not text:
148
+ return "No text provided for analysis"
149
+
150
+ return {
151
+ 'sentiment': self.toolkit.analyze_sentiment(text),
152
+ 'summary': self.toolkit.summarize_text(text)
153
+ }
154
+
155
+ return f"Unknown tool: {action}"
156
+
157
+ async def chat_with_agent(user_input, chat_history, agent_core):
158
  start_time = time.time()
159
  try:
160
+ # Reset tool counter for new turn
161
+ agent_core.tool_execution_count = 0
 
 
 
 
 
 
 
 
 
 
162
 
163
+ # Prepare context-aware prompt
164
+ system_prompt = """You are OmniAgent, a highly advanced AI assistant with multiple capabilities:
165
+
166
+ Core Abilities:
167
+ 1. Task Understanding & Planning
168
+ 2. Web Information Retrieval & Analysis
169
+ 3. Content Summarization & Sentiment Analysis
170
+ 4. Context-Aware Problem Solving
171
+ 5. Creative Solution Generation
172
+
173
+ Available Tools:
174
+ - scrape: Extract and analyze web content
175
+ - search: Find relevant information
176
+ - analyze: Process and understand text
177
+
178
+ Use format:
179
+ Action: take_action
180
+ Parameters: {"action": "tool_name", "parameters": {...}}
181
+
182
+ Approach each task with:
183
+ 1. Initial analysis
184
+ 2. Step-by-step planning
185
+ 3. Tool utilization when needed
186
+ 4. Result synthesis
187
+ 5. Clear explanation
188
+
189
+ Remember to maintain a helpful, professional, yet friendly tone."""
190
 
191
  messages = [
192
  {"role": "system", "content": system_prompt},
193
  {"role": "user", "content": user_input}
194
  ]
195
 
196
+ # Use both models for different aspects of processing
197
+ async def get_cerebras_response():
198
+ response = cerebras_client.completions.create(
199
+ prompt=f"{system_prompt}\n\nUser: {user_input}",
200
+ max_tokens=1000,
201
+ temperature=0.7
202
+ )
203
+ return response.text
204
+
205
+ async def get_groq_response():
206
+ completion = groq_client.chat.completions.create(
207
+ messages=messages,
208
+ temperature=0.7,
209
+ max_tokens=2048,
210
+ stream=True
211
+ )
212
+ return completion
213
+
214
+ # Get responses from both models
215
+ cerebras_future = asyncio.create_task(get_cerebras_response())
216
+ groq_stream = await get_groq_response()
217
 
218
+ # Process responses
219
  response = ""
220
  chain_of_thought = ""
 
221
 
222
+ # Process Groq stream
223
+ for chunk in groq_stream:
224
  if chunk.choices[0].delta and chunk.choices[0].delta.content:
225
  content = chunk.choices[0].delta.content
226
  response += content
 
228
  if "Chain of Thought:" in content:
229
  chain_of_thought += content.split("Chain of Thought:", 1)[-1]
230
 
231
+ # Tool execution handling
232
+ if "Action:" in content:
233
  action_match = re.search(r"Action: (\w+), Parameters: (\{.*\})", content)
234
  if action_match:
 
235
  action = action_match.group(1)
236
+ try:
237
+ parameters = json.loads(action_match.group(2))
238
+ tool_result = await agent_core.execute_tool(
239
+ parameters.get("action"),
240
+ parameters.get("parameters", {})
241
+ )
242
+ response += f"\nTool Result: {json.dumps(tool_result, indent=2)}\n"
243
+ except json.JSONDecodeError:
244
+ response += "\nError: Invalid tool parameters\n"
245
+
246
+ # Integrate Cerebras response
247
+ cerebras_response = await cerebras_future
248
+
249
+ # Combine insights from both models
250
+ final_response = f"{response}\n\nAdditional Insights:\n{cerebras_response}"
251
+
252
+ # Update context
253
+ agent_core.update_context(user_input, final_response)
254
 
255
  compute_time = time.time() - start_time
256
+ token_usage = len(user_input.split()) + len(final_response.split())
257
+
258
+ return final_response, chain_of_thought, f"Compute Time: {compute_time:.2f}s", f"Tokens: {token_usage}"
259
 
260
  except Exception as e:
261
+ logging.error(f"Error in chat_with_agent: {str(e)}", exc_info=True)
262
  return f"Error: {str(e)}", "", "Error occurred", ""
263
 
 
264
  def create_interface():
265
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
266
+ agent_core = AgentCore()
267
+
268
+ gr.Markdown("""# 🌟 OmniAgent: Advanced AI Assistant
269
+ Powered by dual AI models for enhanced capabilities and deeper understanding.""")
270
 
271
  with gr.Row():
272
  with gr.Column(scale=6):
273
+ chat_history = gr.Chatbot(
274
+ label="Interaction History",
275
+ height=600,
276
+ show_label=True
277
+ )
278
  with gr.Column(scale=2):
279
+ with gr.Accordion("Performance Metrics", open=True):
280
+ compute_time = gr.Textbox(label="Processing Time", interactive=False)
281
+ token_usage_display = gr.Textbox(label="Resource Usage", interactive=False)
282
+ with gr.Accordion("Agent Insights", open=True):
283
+ chain_of_thought_display = gr.Textbox(
284
+ label="Reasoning Process",
285
+ interactive=False,
286
+ lines=10
287
+ )
288
 
289
  user_input = gr.Textbox(
290
  label="Your Request",
291
+ placeholder="How can I assist you today?",
292
+ lines=3
293
  )
294
 
295
  with gr.Row():
296
  send_button = gr.Button("Send", variant="primary")
297
+ clear_button = gr.Button("Clear History", variant="secondary")
298
+ export_button = gr.Button("Export Chat", variant="secondary")
299
 
300
  async def handle_chat(chat_history, user_input):
301
  if not user_input.strip():
302
  return chat_history, "", "", ""
303
 
304
+ ai_response, chain_of_thought, compute_info, token_usage = await chat_with_agent(
305
+ user_input,
306
+ chat_history,
307
+ agent_core
308
+ )
309
+
310
  chat_history.append((user_input, ai_response))
311
  return chat_history, chain_of_thought, compute_info, token_usage
312
 
313
  def clear_chat():
314
+ agent_core.context_window.clear()
315
  return [], "", "", ""
316
 
317
  def export_chat(chat_history):
318
  if not chat_history:
319
  return "No chat history to export.", ""
320
 
321
+ filename = f"omnigent_chat_{int(time.time())}.txt"
322
+ chat_text = "\n".join([
323
+ f"User: {item[0]}\nAI: {item[1]}\n"
324
+ for item in chat_history
325
+ ])
326
 
327
  with open(filename, "w") as file:
328
  file.write(chat_text)
329
+ return f"Chat exported to {filename}", ""
330
 
331
  # Event handlers
332
+ send_button.click(
333
+ handle_chat,
334
+ inputs=[chat_history, user_input],
335
+ outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display]
336
+ )
337
+ clear_button.click(
338
+ clear_chat,
339
+ outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display]
340
+ )
341
+ export_button.click(
342
+ export_chat,
343
+ inputs=[chat_history],
344
+ outputs=[compute_time, chain_of_thought_display]
345
+ )
346
+ user_input.submit(
347
+ handle_chat,
348
+ inputs=[chat_history, user_input],
349
+ outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display]
350
+ )
351
+
352
+ gr.Markdown("""### 🚀 Advanced Capabilities:
353
+ - Dual AI Model Processing
354
+ - Advanced Web Content Analysis
355
+ - Sentiment Understanding
356
+ - Intelligent Text Summarization
357
  - Context-Aware Responses
358
+ - Enhanced Error Handling
359
+ - Detailed Performance Tracking
360
+ - Comprehensive Logging
361
  """)
362
 
363
  return demo
364
 
 
365
  if __name__ == "__main__":
366
  demo = create_interface()
367
+ demo.launch(share=True)