File size: 1,436 Bytes
c19d193 80b16e1 8fe992b 9b5b26a 80b16e1 8c01ffb 6aae614 80b16e1 e121372 80b16e1 13d500a 8c01ffb 80b16e1 8c01ffb 9b5b26a 8c01ffb 80b16e1 861422e 80b16e1 8c01ffb 8fe992b 80b16e1 8c01ffb 80b16e1 8fe992b 9b5b26a 80b16e1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
import yaml
from smolagents import (
CodeAgent,
DuckDuckGoSearchTool,
HfApiModel,
OpenAIServerModel,
load_tool,
tool,
)
from Gradio_UI import GradioUI
from todo_agents import (
add_task,
get_current_time_in_timezone,
get_todays_tasks,
update_task_status,
)
from tools.final_answer import FinalAnswerTool
final_answer = FinalAnswerTool()
# Remote LLM
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id="https://wxknx1kg971u7k1n.us-east-1.aws.endpoints.huggingface.cloud", # it is possible that this model may be overloaded
custom_role_conversions=None,
)
# # Local LLM
# model = OpenAIServerModel(
# model_id="Qwen/Qwen2.5-Coder-14B-Instruct-GGUF",
# api_base="http://100.81.11.125:1234/v1",
# api_key="lm-studio",
# )
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", "r") as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[
final_answer,
get_current_time_in_timezone,
get_todays_tasks,
add_task,
update_task_status,
], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates,
)
GradioUI(agent).launch()
|