File size: 1,488 Bytes
9b5b26a
c19d193
6aae614
6f40147
 
8fe992b
9b5b26a
 
8c01ffb
6aae614
6f40147
 
 
ae7a494
 
 
 
e121372
bf6d34c
 
6f40147
fe328e0
13d500a
8c01ffb
 
9b5b26a
 
8c01ffb
861422e
 
9b5b26a
8c01ffb
8fe992b
6f40147
8c01ffb
 
 
 
 
 
861422e
8fe992b
 
9b5b26a
8c01ffb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import yaml
from tools.final_answer import FinalAnswerTool
from tools.visit_webpage import VisitWebpageTool
from tools.linkedin_post_composer import LinkedInPostPromptComposerTool

from Gradio_UI import GradioUI


final_answer = FinalAnswerTool()
visit_web_page = VisitWebpageTool()
linkedin_post_composer = LinkedInPostPromptComposerTool()
search = DuckDuckGoSearchTool()

# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' 

model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',# it is possible that this model may be overloaded
custom_role_conversions=None,
)


# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)

with open("prompts.yaml", 'r') as stream:
    prompt_templates = yaml.safe_load(stream)
    
agent = CodeAgent(
    model=model,
    tools=[linkedin_post_composer, visit_web_page, search, image_generation_tool, final_answer], ## add your tools here (don't remove final answer)
    max_steps=6,
    verbosity_level=1,
    grammar=None,
    planning_interval=None,
    name=None,
    description=None,
    prompt_templates=prompt_templates
)


GradioUI(agent).launch()