uasername's picture
Update app.py
6da24fc verified
raw
history blame
5.53 kB
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
from kokoro import KPipeline
import soundfile as sf
import os
import numpy as np
# Below is an example of a tool that does nothing. Amaze us with your creativity !
@tool
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
#Keep this format for the description / args / args description but feel free to modify the tool
"""A tool that does nothing yet
Args:
arg1: the first argument
arg2: the second argument
"""
return "What magic will you build ?"
# Initialize the Kokoro pipeline
pipeline = KPipeline(lang_code='a') # 'a' stands for American English
@tool
def text_to_speech_kokoro(text: str, voice: str = 'af_heart', speed: float = 1.0) -> str:
"""Convert text to speech using the Kokoro-82M model.
Args:
text: The text to be converted to speech.
voice: The voice to use for speech synthesis (default is 'af_heart').
speed: The speed of the speech (default is 1.0).
Returns:
An AgentAudio object with the relative URL to the generated audio file.
"""
try:
# Generate speech audio
generator = pipeline(text, voice=voice, speed=speed, split_pattern=r'\n+')
audio_segments = []
for _, _, audio in generator:
audio_segments.append(audio)
if not audio_segments:
raise ValueError("No audio generated.")
# Concatenate segments into one audio array
full_audio = np.concatenate(audio_segments)
sample_rate = 24000 # Kokoro outputs at 24 kHz
# Ensure the static folder exists and save the file there
os.makedirs("static", exist_ok=True)
filename = os.path.join("static", "output.wav")
sf.write(filename, full_audio, sample_rate)
# Return an AgentAudio object pointing to the relative URL of the audio file
from smolagents.agent_types import AgentAudio
return AgentAudio(f"/static/output.wav")
except Exception as e:
return f"Error generating speech: {str(e)}"
@tool
def search_dad_jokes(term: str) -> str:
"""A tool that searches for dad jokes containing a specific term.
Args:
term: The keyword to search for in dad jokes.
"""
try:
headers = {
"Accept": "application/json",
"User-Agent": "YourAppName (https://yourappurl.com)"
}
response = requests.get(f"https://icanhazdadjoke.com/search?term={term}", headers=headers)
data = response.json()
if data['results']:
jokes = [joke['joke'] for joke in data['results']]
return f"Found {len(jokes)} jokes:\n" + "\n\n".join(jokes)
else:
return f"No jokes found for the term '{term}'."
except Exception as e:
return f"Error searching for jokes: {str(e)}"
@tool
def get_random_cocktail() -> str:
"""A tool that fetches a random cocktail recipe.
"""
try:
response = requests.get('https://www.thecocktaildb.com/api/json/v1/1/random.php')
data = response.json()
if data and 'drinks' in data:
drink = data['drinks'][0]
cocktail_name = drink['strDrink']
ingredients = [drink[f'strIngredient{i}'] for i in range(1, 16) if drink[f'strIngredient{i}']]
instructions = drink['strInstructions']
return f"Cocktail: {cocktail_name}\nIngredients: {', '.join(ingredients)}\nInstructions: {instructions}"
else:
return "No cocktail found. Please try again."
except Exception as e:
return f"Error fetching random cocktail: {str(e)}"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer, image_generation_tool, get_current_time_in_timezone, get_random_cocktail, search_dad_jokes, text_to_speech_kokoro], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch()