Spaces:
Sleeping
Sleeping
File size: 3,621 Bytes
9b5b26a 9caf3d5 c19d193 ee7986e d3dfdec 6aae614 8fe992b 9b5b26a d3dfdec ae6c007 d3dfdec 9caf3d5 3ec3001 9caf3d5 3ec3001 9caf3d5 9b5b26a f832d83 9b5b26a f832d83 d3dfdec 9b5b26a f832d83 9b5b26a d3dfdec 9b5b26a 8c01ffb 6aae614 ae7a494 e121372 bf6d34c 6daa4f2 f832d83 6daa4f2 fe328e0 13d500a 8c01ffb 13e97e7 8c01ffb 861422e 9b5b26a 8c01ffb 8fe992b 3ec3001 b2a6418 8c01ffb 861422e 8fe992b 9b5b26a 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
import datetime
import pytz
import re
import requests
from typing import List
import yaml
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel, tool
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
@tool
def get_website_content(url: str) -> str:
"""
This tool fetches the content of a website given its URL.
Args:
url: The URL of the website to fetch
Returns:
str: The content of the website
"""
try:
response = requests.get(url)
response.raise_for_status()
return response.text
except requests.RequestException as e:
return f"Error fetching website content: {str(e)}"
@tool
def extract_website_content_parts(url: str, extraction_pattern: str) -> List[str]:
"""
This tool extracts content parts matching the regular expression string `extraction_pattern` of a website given its `url`.
Args:
url: The URL of the website from which content parts should be extracted
extraction_pattern: The regular expression string of the content parts to extract from the website
Returns:
List[str]: The deduplicated content parts matching extraction_pattern of the website `url`
"""
try:
response = requests.get(url)
response.raise_for_status()
matches: List[str] = re.findall(extraction_pattern, response.text)
return list(set(matches))
except requests.RequestException as e:
return [f"Error fetching website content: {str(e)}"]
@tool
def get_papers_url_for_date(year:int, month:int, day:int)-> str:
"""A tool that constructs a URL where machine learning papers for a specific date (YYYY-MM-DD) are listed.
Args:
year: the year YYYY
month: the month MM
day: the day DD
Returns:
str: The URL where machine learning papers for the specific date are listed.
"""
return f"https://huggingface.co/papers?date={year}-{month}-{day}"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
Returns:
str: The current local time as `"%Y-%m-%d %H:%M:%S`.
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',# it is possible that this model may be overloaded
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='Qwen/Qwen2.5-Coder-32B-Instruct'
custom_role_conversions=None,
)
search_tool = DuckDuckGoSearchTool()
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer, search_tool, extract_website_content_parts, get_website_content, get_papers_url_for_date, get_current_time_in_timezone],
max_steps=30,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch() |