Alaaeldin commited on
Commit
6b14cc6
·
verified ·
1 Parent(s): 6cf51b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -307
app.py CHANGED
@@ -1,332 +1,94 @@
1
- from smolagents import tool
2
- import requests
3
- import json
4
  import datetime
5
- import os
6
- import base64
7
- from typing import List, Optional, Dict, Any
8
- import json
9
- import io
10
-
11
-
12
- @tool
13
- def web_scrape(url: str) -> str:
14
- """Scrapes the content from a specified URL.
15
-
16
- Args:
17
- url: The URL to scrape content from.
18
- """
19
- try:
20
- response = requests.get(url, headers={
21
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
22
- })
23
- response.raise_for_status()
24
- return response.text
25
- except Exception as e:
26
- return f"Error scraping {url}: {str(e)}"
27
-
28
 
 
29
  @tool
30
- def extract_structured_data(text: str, schema: str) -> str:
31
- """Extracts structured data from text based on a provided schema.
32
 
33
  Args:
34
- text: The text to extract data from.
35
- schema: JSON schema describing the data structure to extract.
36
  """
37
  try:
38
- # In a real implementation, you might use regex, NLP, or ML models
39
- # This is a placeholder for demonstrating the concept
40
- return f"Extracted structured data according to schema: {schema}"
41
- except Exception as e:
42
- return f"Error extracting structured data: {str(e)}"
43
-
44
-
45
- @tool
46
- def data_visualization(data: str, chart_type: str, title: str = "Data Visualization") -> str:
47
- """Creates a textual representation of data for visualization.
48
-
49
- Args:
50
- data: JSON string with the data to visualize.
51
- chart_type: Type of representation to create (summary, table, ascii).
52
- title: Title for the visualization.
53
- """
54
- try:
55
- # Parse the input data
56
- data_parsed = json.loads(data)
57
 
58
- # Create appropriate representation
59
- result = f"=== {title} ===\n\n"
60
 
61
- if chart_type.lower() == 'summary':
62
- # Provide summary statistics
63
- if isinstance(data_parsed, list):
64
- result += f"Number of items: {len(data_parsed)}\n"
65
- if len(data_parsed) > 0 and isinstance(data_parsed[0], dict):
66
- keys = data_parsed[0].keys()
67
- result += f"Fields: {', '.join(keys)}\n\n"
68
-
69
- # Try to get numeric values for min/max/avg
70
- for key in keys:
71
- try:
72
- values = [item[key] for item in data_parsed if isinstance(item[key], (int, float))]
73
- if values:
74
- result += f"{key}:\n"
75
- result += f" Min: {min(values)}\n"
76
- result += f" Max: {max(values)}\n"
77
- result += f" Avg: {sum(values)/len(values):.2f}\n\n"
78
- except (KeyError, TypeError):
79
- pass
80
- else:
81
- result += f"Data summary: {str(data_parsed)[:100]}...\n"
82
-
83
- elif chart_type.lower() == 'table':
84
- # Create ASCII table
85
- if isinstance(data_parsed, list) and len(data_parsed) > 0 and isinstance(data_parsed[0], dict):
86
- # Get headers
87
- headers = list(data_parsed[0].keys())
88
-
89
- # Calculate column widths
90
- col_widths = [len(h) for h in headers]
91
- for item in data_parsed:
92
- for i, h in enumerate(headers):
93
- if h in item:
94
- col_widths[i] = max(col_widths[i], len(str(item[h])))
95
-
96
- # Create header row
97
- header_row = " | ".join(h.ljust(col_widths[i]) for i, h in enumerate(headers))
98
- separator = "-+-".join("-" * w for w in col_widths)
99
-
100
- result += header_row + "\n"
101
- result += separator + "\n"
102
-
103
- # Create data rows
104
- for item in data_parsed[:10]: # Limit to 10 rows for readability
105
- row = " | ".join(str(item.get(h, "")).ljust(col_widths[i]) for i, h in enumerate(headers))
106
- result += row + "\n"
107
-
108
- if len(data_parsed) > 10:
109
- result += f"\n... and {len(data_parsed) - 10} more rows"
110
- else:
111
- result += "Data is not in a format suitable for table display"
112
-
113
- elif chart_type.lower() == 'ascii':
114
- # Create simple ASCII chart
115
- if isinstance(data_parsed, list):
116
- # Try to extract x and y values
117
- x_values = []
118
- y_values = []
119
-
120
- # Attempt to detect data structure
121
- if len(data_parsed) > 0:
122
- if isinstance(data_parsed[0], dict) and len(data_parsed[0]) >= 2:
123
- # Use first two keys as x and y
124
- keys = list(data_parsed[0].keys())
125
- x_key, y_key = keys[0], keys[1]
126
-
127
- for item in data_parsed:
128
- try:
129
- x_values.append(str(item[x_key]))
130
- y_values.append(float(item[y_key]))
131
- except (KeyError, ValueError, TypeError):
132
- continue
133
- elif isinstance(data_parsed[0], (list, tuple)) and len(data_parsed[0]) >= 2:
134
- # Use first two elements as x and y
135
- for item in data_parsed:
136
- try:
137
- x_values.append(str(item[0]))
138
- y_values.append(float(item[1]))
139
- except (IndexError, ValueError, TypeError):
140
- continue
141
-
142
- if x_values and y_values:
143
- # Create a simple bar chart
144
- max_y = max(y_values)
145
- scale = 20 / max_y if max_y > 0 else 1
146
-
147
- result += f"Chart scale: Each * represents {1/scale:.2f} units\n\n"
148
-
149
- for i, (x, y) in enumerate(zip(x_values, y_values)):
150
- bar_length = int(y * scale)
151
- result += f"{x.ljust(10)}: {'*' * bar_length} ({y})\n"
152
- else:
153
- result += "Could not extract plottable values from the data"
154
- else:
155
- result += "Data is not in a format suitable for ASCII chart display"
156
- else:
157
- return f"Unsupported visualization type: {chart_type}. Use 'summary', 'table', or 'ascii'."
158
 
159
- return result
160
- except Exception as e:
161
- return f"Error creating visualization: {str(e)}"
162
-
163
-
164
- @tool
165
- def code_refactor(code: str, language: str, optimization: str) -> str:
166
- """Refactors code based on specified optimization criteria.
167
-
168
- Args:
169
- code: The source code to refactor.
170
- language: Programming language of the code.
171
- optimization: Type of optimization to perform (performance, readability, security).
172
- """
173
- try:
174
- # In a real implementation, you'd use language-specific tools or ML models
175
- # This is a placeholder for demonstrating the concept
176
- if optimization.lower() == 'performance':
177
- return f"Code refactored for performance: \n```{language}\n# Performance optimized\n{code}\n```"
178
- elif optimization.lower() == 'readability':
179
- return f"Code refactored for readability: \n```{language}\n# Readability optimized\n{code}\n```"
180
- elif optimization.lower() == 'security':
181
- return f"Code refactored for security: \n```{language}\n# Security optimized\n{code}\n```"
182
- else:
183
- return f"Unsupported optimization type: {optimization}"
184
- except Exception as e:
185
- return f"Error refactoring code: {str(e)}"
186
-
187
-
188
- @tool
189
- def api_interaction(endpoint: str, method: str = "GET", params: Optional[str] = None, headers: Optional[str] = None) -> str:
190
- """Interacts with an API endpoint.
191
-
192
- Args:
193
- endpoint: The API endpoint URL.
194
- method: HTTP method (GET, POST, PUT, DELETE).
195
- params: JSON string of parameters or data to send.
196
- headers: JSON string of headers to include.
197
- """
198
- try:
199
- # Parse headers and params if provided
200
- headers_dict = json.loads(headers) if headers else {}
201
 
202
- if method.upper() == "GET":
203
- params_dict = json.loads(params) if params else {}
204
- response = requests.get(endpoint, params=params_dict, headers=headers_dict)
205
- elif method.upper() == "POST":
206
- data_dict = json.loads(params) if params else {}
207
- response = requests.post(endpoint, json=data_dict, headers=headers_dict)
208
- elif method.upper() == "PUT":
209
- data_dict = json.loads(params) if params else {}
210
- response = requests.put(endpoint, json=data_dict, headers=headers_dict)
211
- elif method.upper() == "DELETE":
212
- response = requests.delete(endpoint, headers=headers_dict)
213
- else:
214
- return f"Unsupported HTTP method: {method}"
215
 
216
- response.raise_for_status()
 
217
 
218
- # Try to return JSON if possible, otherwise return text
219
- try:
220
- return json.dumps(response.json(), indent=2)
221
- except:
222
- return response.text
 
 
223
  except Exception as e:
224
- return f"Error interacting with API {endpoint}: {str(e)}"
225
-
226
 
227
  @tool
228
- def natural_language_query(database_description: str, query: str) -> str:
229
- """Translates a natural language query to structured data operations.
230
 
231
  Args:
232
- database_description: Description of the database schema.
233
- query: Natural language query about the data.
234
  """
235
  try:
236
- # In a real implementation, you'd use NLP to SQL or similar technology
237
- # This is a placeholder for demonstrating the concept
238
- return f"Query translated and executed. Results for: {query}"
 
 
239
  except Exception as e:
240
- return f"Error processing natural language query: {str(e)}"
241
-
242
 
243
- @tool
244
- def file_operations(operation: str, file_path: str, content: Optional[str] = None) -> str:
245
- """Performs operations on files.
246
-
247
- Args:
248
- operation: The operation to perform (read, write, append, list).
249
- file_path: Path to the file or directory.
250
- content: Content to write or append (only for write/append operations).
251
- """
252
- try:
253
- if operation.lower() == 'read':
254
- with open(file_path, 'r') as file:
255
- return file.read()
256
- elif operation.lower() == 'write':
257
- if content is None:
258
- return "Content must be provided for write operation"
259
- with open(file_path, 'w') as file:
260
- file.write(content)
261
- return f"Content written to {file_path}"
262
- elif operation.lower() == 'append':
263
- if content is None:
264
- return "Content must be provided for append operation"
265
- with open(file_path, 'a') as file:
266
- file.write(content)
267
- return f"Content appended to {file_path}"
268
- elif operation.lower() == 'list':
269
- if os.path.isdir(file_path):
270
- return str(os.listdir(file_path))
271
- else:
272
- return f"{file_path} is not a directory"
273
- else:
274
- return f"Unsupported file operation: {operation}"
275
- except Exception as e:
276
- return f"Error performing file operation: {str(e)}"
277
 
 
 
278
 
279
- @tool
280
- def semantic_search(corpus: str, query: str, top_k: int = 3) -> str:
281
- """Performs semantic search on a corpus of text.
282
 
283
- Args:
284
- corpus: The text corpus to search within (could be a large text or list of documents).
285
- query: The search query.
286
- top_k: Number of top results to return.
287
- """
288
- try:
289
- # In a real implementation, you'd use embedding models and vector similarity
290
- # This is a placeholder for demonstrating the concept
291
- results = [
292
- {"text": f"Result {i} for query: {query}", "score": (top_k - i) / top_k}
293
- for i in range(1, top_k + 1)
294
- ]
295
- return json.dumps(results, indent=2)
296
- except Exception as e:
297
- return f"Error performing semantic search: {str(e)}"
298
-
299
-
300
- @tool
301
- def weather_forecast(location: str) -> str:
302
- """Fetches weather forecast for a specified location.
303
-
304
- Args:
305
- location: The location to get weather forecast for (city name or coordinates).
306
- """
307
- try:
308
- # In a real implementation, you'd connect to a weather API
309
- # This is a placeholder for demonstrating the concept
310
- return f"Weather forecast for {location}: Sunny with a chance of AI"
311
- except Exception as e:
312
- return f"Error fetching weather forecast: {str(e)}"
313
-
314
-
315
- @tool
316
- def task_scheduler(task: str, schedule_time: str, priority: int = 1) -> str:
317
- """Schedules a task to be performed at a specified time.
318
-
319
- Args:
320
- task: Description of the task to be scheduled.
321
- schedule_time: Time to schedule the task (ISO format).
322
- priority: Priority level of the task (1-5, where 1 is highest).
323
- """
324
- try:
325
- # Parse the schedule time
326
- schedule_datetime = datetime.datetime.fromisoformat(schedule_time)
327
-
328
- # In a real implementation, you'd connect to a scheduling system
329
- # This is a placeholder for demonstrating the concept
330
- return f"Task '{task}' scheduled for {schedule_datetime} with priority {priority}"
331
- except Exception as e:
332
- return f"Error scheduling task: {str(e)}"
 
1
+ from smolagents import CodeAgent, tool
 
 
2
  import datetime
3
+ import pytz
4
+ import yaml
5
+ from tools.final_answer import FinalAnswerTool
6
+ from Gradio_UI import GradioUI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ # Define a simple tool that doesn't rely on any external libraries
9
  @tool
10
+ def text_analyzer(text: str) -> str:
11
+ """Analyzes text and returns statistics about it.
12
 
13
  Args:
14
+ text: The text to analyze.
 
15
  """
16
  try:
17
+ # Simple word count
18
+ words = text.split()
19
+ word_count = len(words)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ # Character count
22
+ char_count = len(text)
23
 
24
+ # Unique words
25
+ unique_words = len(set(word.lower() for word in words))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ # Average word length
28
+ avg_word_length = sum(len(word) for word in words) / max(1, word_count)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
+ # Most common words (top 5)
31
+ word_freq = {}
32
+ for word in words:
33
+ word_lower = word.lower()
34
+ word_freq[word_lower] = word_freq.get(word_lower, 0) + 1
 
 
 
 
 
 
 
 
35
 
36
+ common_words = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)[:5]
37
+ common_words_str = ", ".join(f"{word} ({count})" for word, count in common_words)
38
 
39
+ return f"""Text Analysis Results:
40
+ - Word count: {word_count}
41
+ - Character count: {char_count}
42
+ - Unique words: {unique_words}
43
+ - Average word length: {avg_word_length:.2f}
44
+ - Most common words: {common_words_str}
45
+ """
46
  except Exception as e:
47
+ return f"Error analyzing text: {str(e)}"
 
48
 
49
  @tool
50
+ def get_current_time_in_timezone(timezone: str) -> str:
51
+ """A tool that fetches the current local time in a specified timezone.
52
 
53
  Args:
54
+ timezone: A string representing a valid timezone (e.g., 'America/New_York').
 
55
  """
56
  try:
57
+ # Create timezone object
58
+ tz = pytz.timezone(timezone)
59
+ # Get current time in that timezone
60
+ local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
61
+ return f"The current local time in {timezone} is: {local_time}"
62
  except Exception as e:
63
+ return f"Error fetching time for timezone '{timezone}': {str(e)}"
 
64
 
65
+ # Set up the agent with minimal tools
66
+ final_answer = FinalAnswerTool()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
+ with open("prompts.yaml", 'r') as stream:
69
+ prompt_templates = yaml.safe_load(stream)
70
 
71
+ from smolagents import HfApiModel
 
 
72
 
73
+ model = HfApiModel(
74
+ max_tokens=2096,
75
+ temperature=0.5,
76
+ model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
77
+ custom_role_conversions=None,
78
+ )
79
+
80
+ # Create agent with minimal tools
81
+ agent = CodeAgent(
82
+ model=model,
83
+ tools=[text_analyzer, get_current_time_in_timezone, final_answer],
84
+ max_steps=6,
85
+ verbosity_level=1,
86
+ grammar=None,
87
+ planning_interval=None,
88
+ name=None,
89
+ description=None,
90
+ prompt_templates=prompt_templates
91
+ )
92
+
93
+ # Launch the Gradio UI
94
+ GradioUI(agent).launch()