|
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool |
|
import datetime |
|
import requests |
|
import pytz |
|
import yaml |
|
from tools.final_answer import FinalAnswerTool |
|
|
|
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects |
|
import json |
|
from typing import Dict, Any, Optional, List |
|
|
|
from Gradio_UI import GradioUI |
|
|
|
verbose = True |
|
if verbose: print("Running app.py") |
|
|
|
|
|
|
|
|
|
@tool |
|
def my_custom_tool(arg1:str, arg2:int)-> str: |
|
|
|
"""A tool that does nothing yet |
|
Args: |
|
arg1: the first argument |
|
arg2: the second argument |
|
""" |
|
return "What magic will you build ?" |
|
|
|
@tool |
|
def fetch_active_crypto(currency: str = 'USD', chunk_size: int = 100) -> Optional[List[Dict[str, Any]]]: |
|
"""A tool that fetches and reverse sorts by market_cap all active crypto in currency. |
|
Args: |
|
currency: A string representing the currency the value is returned in (default: 'USD'). |
|
chunk_size: The number of cryptocurrencies to process in each chunk (default: 100). |
|
Returns: |
|
Optional[List[Dict[str, Any]]]: A list of dictionaries containing the top cryptocurrencies by market cap, |
|
chunked into smaller pieces, or None if an error occurs. |
|
""" |
|
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest' |
|
parameters = { |
|
'start': '1', |
|
'limit': '5000', |
|
'convert': currency |
|
} |
|
headers = { |
|
'Accepts': 'application/json', |
|
'X-CMC_PRO_API_KEY': 'e375c697-e504-464e-b800-2b8cf9c67765', |
|
} |
|
|
|
session = requests.Session() |
|
session.headers.update(headers) |
|
|
|
try: |
|
response = session.get(url, params=parameters) |
|
response.raise_for_status() |
|
data = json.loads(response.text) |
|
|
|
|
|
if 'data' in data: |
|
sorted_crypto = sorted(data['data'], key=lambda x: x['quote'][currency]['market_cap'], reverse=True) |
|
|
|
|
|
chunks = [sorted_crypto[i:i + chunk_size] for i in range(0, len(sorted_crypto), chunk_size)] |
|
|
|
|
|
result = [] |
|
for chunk in chunks: |
|
chunk_dict = {crypto['name']: crypto['quote'][currency] for crypto in chunk} |
|
result.append(chunk_dict) |
|
|
|
return result |
|
else: |
|
print("No data found in the response.") |
|
return None |
|
|
|
except (ConnectionError, Timeout, TooManyRedirects, requests.exceptions.HTTPError) as e: |
|
print(f"An error occurred: {e}") |
|
return None |
|
|
|
|
|
@tool |
|
def get_current_time_in_timezone(timezone: str) -> str: |
|
"""A tool that fetches the current local time in a specified timezone. |
|
Args: |
|
timezone: A string representing a valid timezone (e.g., 'America/New_York'). |
|
""" |
|
try: |
|
|
|
tz = pytz.timezone(timezone) |
|
|
|
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") |
|
return f"The current local time in {timezone} is: {local_time}" |
|
except Exception as e: |
|
return f"Error fetching time for timezone '{timezone}': {str(e)}" |
|
|
|
|
|
final_answer = FinalAnswerTool() |
|
|
|
|
|
|
|
MODEL_IDS = [ |
|
'Qwen/Qwen2.5-Coder-14B-Instruct', |
|
'Qwen/Qwen2.5-Coder-3B-Instruct', |
|
'Qwen/Qwen2.5-Coder-7B-Instruct', |
|
'Qwen/Qwen2.5-Coder-32B-Instruct', |
|
'Qwen/Qwen2.5-Coder-1.5B-Instruct' |
|
|
|
|
|
|
|
|
|
|
|
] |
|
|
|
def is_model_overloaded(model_url): |
|
"""Verify if the model is overloaded doing a test call.""" |
|
try: |
|
response = requests.post(model_url, json={"inputs": "Test"}) |
|
if verbose: |
|
print(response.status_code) |
|
if response.status_code == 503: |
|
return True |
|
if response.status_code == 404: |
|
return True |
|
if response.status_code == 424: |
|
return True |
|
return False |
|
except requests.RequestException: |
|
return True |
|
|
|
def get_available_model(): |
|
"""Select the first model available from the list.""" |
|
for model_url in MODEL_IDS: |
|
print("trying",model_url) |
|
if not is_model_overloaded(model_url): |
|
return model_url |
|
return MODEL_IDS[0] |
|
|
|
if verbose: print("Checking available models.") |
|
selected_model_id = get_available_model() |
|
if verbose: print(f"Selected: {selected_model_id}") |
|
|
|
model = HfApiModel( |
|
max_tokens=1048, |
|
temperature=0.5, |
|
|
|
|
|
|
|
model_id = selected_model_id, |
|
custom_role_conversions=None, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) |
|
|
|
with open("prompts.yaml", 'r') as stream: |
|
prompt_templates = yaml.safe_load(stream) |
|
|
|
agent = CodeAgent( |
|
model=model, |
|
tools=[final_answer, image_generation_tool, get_current_time_in_timezone, fetch_active_crypto], |
|
max_steps=6, |
|
verbosity_level=1, |
|
grammar=None, |
|
planning_interval=None, |
|
name=None, |
|
description=None, |
|
prompt_templates=prompt_templates |
|
) |
|
|
|
|
|
GradioUI(agent).launch() |