Spaces:
Sleeping
Sleeping
File size: 4,393 Bytes
4d98ee1 9b5b26a c19d193 6aae614 8fe992b 9b5b26a 4d98ee1 9b5b26a 4d98ee1 9b5b26a 4d98ee1 9b5b26a 4d98ee1 9b5b26a 4d98ee1 9b5b26a 8c01ffb 4d98ee1 6aae614 ae7a494 4d98ee1 e121372 4d98ee1 13d500a 8c01ffb 4d98ee1 9b5b26a 8c01ffb 4d98ee1 861422e 9b5b26a 4d98ee1 8c01ffb 8fe992b 4d98ee1 8fe992b 4d98ee1 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
# 1 - Timezone tool
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object using pytz library
tz = pytz.timezone(timezone)
# Get current time in that timezone and format it as a readable string
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
# Return formatted response with the timezone and current time
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
# Handle any errors that might occur (invalid timezone, etc.)
return f"Error fetching time for timezone '{timezone}': {str(e)}"
# 2 - Image generation
@tool
def generate_image_from_text(prompt: str) -> str:
"""A tool that generates an image based on a text description.
Args:
prompt: A detailed text description of the image you want to generate.
"""
try:
# Call the image generation tool loaded from Hugging Face Hub
# The tool is loaded further down in the script before it's used here
result = image_generation_tool(prompt)
# Return success message with the result (which should contain image URL or path)
return f"Image generated successfully: {result}"
except Exception as e:
# Handle any errors that occur during image generation
return f"Error generating image: {str(e)}"
# 3 - Web search
# Initialize the DuckDuckGo search tool
search_tool = DuckDuckGoSearchTool()
@tool
def search_web(query: str) -> str:
"""A tool that searches the web using DuckDuckGo for information.
Args:
query: The search query to find information on the web.
"""
try:
# Execute the search query using DuckDuckGo
search_results = search_tool(query)
# Format and return the search results
return f"Search results for '{query}':\n\n{search_results}"
except Exception as e:
# Handle any errors that occur during the search
return f"Error searching the web: {str(e)}"
# This tool is required for the agent to provide final answers
final_answer = FinalAnswerTool()
# Model configuration
# If the agent does not answer, the model is overloaded
# Alternative endpoint: 'https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
model = HfApiModel(
max_tokens=2096, # Maximum number of tokens in the response
temperature=0.5, # Controls randomness: lower = more deterministic
model_id='Qwen/Qwen2.5-Coder-32B-Instruct', # Using Qwen 2.5 Coder model
custom_role_conversions=None,
)
# Load External Tools
# Import the image generation tool from Hugging Face Hub
# This tool will be used by the generate_image_from_text function
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
# Load Prompt Templates
# Load prompt templates from YAML file for consistent agent responses
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
# Agent Configuration
agent = CodeAgent(
model=model,
tools=[
get_current_time_in_timezone, # Tool 1: Time zone tool
generate_image_from_text, # Tool 2: Image generation tool
search_web, # Tool 3: Web search tool
final_answer # Required final answer tool
],
max_steps=6, # Maximum number of reasoning steps
verbosity_level=1, # Level of detail in agent's output
grammar=None, # No specific grammar constraints
planning_interval=None, # No specific planning interval
name=None, # No custom agent name
description=None, # No custom agent description
prompt_templates=prompt_templates # Using loaded prompt templates
)
# Launch GRADIO UI
# Start the Gradio interface with our configured agent
GradioUI(agent).launch() |