Spaces:
Sleeping
Sleeping
File size: 4,186 Bytes
9b5b26a c19d193 6aae614 302d039 a92cc25 8fe992b 9b5b26a 5df72d6 9b5b26a 3d1237b 9b5b26a 8c01ffb a92cc25 302d039 6aae614 ae7a494 295bd6d 7275668 f25116e 7275668 ae7a494 a75ece5 e121372 a75ece5 13d500a 8c01ffb 9b5b26a 8c01ffb 861422e 9b5b26a 8c01ffb 8fe992b 3235c45 8c01ffb 861422e 8fe992b 9b5b26a 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from tools.web_search import DuckDuckGoSearchTool
from tools.visit_webpage import VisitWebpageTool
from Gradio_UI import GradioUI
# Below is an example of a tool that does nothing. Amaze us with your creativity !
@tool
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
#Keep this format for the description / args / args description but feel free to modify the tool
"""A tool that does nothing yet
Args:
arg1: the first argument
arg2: the second argument
"""
return "What magic will you build ?"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
webpage = VisitWebpageTool()
search = DuckDuckGoSearchTool()
final_answer = FinalAnswerTool()
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
# model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
# deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
# deepseek-ai/DeepSeek-R1
# https://api-inference.huggingface.co/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B/v1/chat/completions
MODEL_IDS = [
#'https://wxknx1kg971u7k1n.us-east-1.aws.endpoints.huggingface.cloud/',
#'https://jc26mwg228mkj8dw.us-east-1.aws.endpoints.huggingface.cloud/',
#'meta-llama/Llama-3.2-1B-Instruct', ## Does a poor job of interpreting my questions and matching them to the tools
'Qwen/Qwen2.5-Coder-32B-Instruct',
'Qwen/Qwen2.5-Coder-14B-Instruct',
'Qwen/Qwen2.5-Coder-7B-Instruct',
'Qwen/Qwen2.5-Coder-3B-Instruct',
'Qwen/Qwen2.5-Coder-1.5B-Instruct'
# Add here wherever model is working for you
]
def is_model_overloaded(model_url):
"""Verify if the model is overloaded doing a test call."""
try:
response = requests.post(model_url, json={"inputs": "Test"})
if verbose:
print(response.status_code)
if response.status_code == 503: # 503 Service Unavailable = Overloaded
return True
if response.status_code == 404: # 404 Client Error: Not Found
return True
if response.status_code == 424: # 424 Client Error: Failed Dependency for url:
return True
return False
except requests.RequestException:
return True # if there are an error is overloaded
def get_available_model():
"""Select the first model available from the list."""
for model_url in MODEL_IDS:
print("trying",model_url)
if not is_model_overloaded(model_url):
return model_url
return MODEL_IDS[0] # if all are failing, use the first model by dfault
selected_model_id = get_available_model()
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id=selected_model_id,
custom_role_conversions=None,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[image_generation_tool,final_answer], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch() |