File size: 5,016 Bytes
9b5b26a
 
 
 
c19d193
47728dd
 
 
 
 
 
6aae614
ae04b8f
 
9b5b26a
ae04b8f
 
 
9b5b26a
5df72d6
9b5b26a
3d1237b
9b5b26a
 
 
 
 
 
 
 
ae04b8f
 
 
 
 
 
 
9b5b26a
 
 
 
 
 
 
 
 
 
 
 
 
 
8c01ffb
 
6aae614
ae7a494
 
ae04b8f
ae7a494
ae04b8f
 
 
 
 
 
 
 
 
 
 
8c01ffb
ae04b8f
8c01ffb
9b5b26a
 
8c01ffb
861422e
 
9b5b26a
47728dd
 
 
 
 
 
 
 
 
8c01ffb
8fe992b
47728dd
8c01ffb
 
 
 
 
 
861422e
8fe992b
 
47728dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b5b26a
8c01ffb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import datetime
import requests
import pytz
import yaml
import os
import sys 
import subprocess  # Ajout de l'import manquant pour ShellCommandTool
import io
import json
from huggingface_hub import HfApi
from tools.final_answer import FinalAnswerTool
from tools.visit_webpage import VisitWebpageTool
from tools.web_search import DuckDuckGoSearchTool
from Gradio_UI import GradioUI
from smolagents.models import OpenAIServerModel
from tools.create_file_tool import CreateFileTool
from tools.modify_file_tool import ModifyFileTool

# Below is an example of a tool that does nothing. Amaze us with your creativity !
@tool
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
    #Keep this format for the description / args / args description but feel free to modify the tool
    """A tool that does nothing yet 
    Args:
        arg1: the first argument
        arg2: the second argument
    """
    return "What magic will you build ?"

# Below is an example of a tool that does nothing. Amaze us with your creativity !
@tool
def get_current_realtime()-> str: #it's import to specify the return type
    #Keep this format for the description / args / args description but feel free to modify the tool
    """A tool that get the current realtime
    """
    return datetime.datetime.now()
@tool
def get_current_time_in_timezone(timezone: str) -> str:
    """A tool that fetches the current local time in a specified timezone.
    Args:
        timezone: A string representing a valid timezone (e.g., 'America/New_York').
    """
    try:
        # Create timezone object
        tz = pytz.timezone(timezone)
        # Get current time in that timezone
        local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
        return f"The current local time in {timezone} is: {local_time}"
    except Exception as e:
        return f"Error fetching time for timezone '{timezone}': {str(e)}"


final_answer = FinalAnswerTool()

# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' è

# model = HfApiModel(
#     model_id="http://192.168.1.141:1234/v1",
#     max_new_tokens=2096,
#     temperature=0.5
# )
# Configuration du modèle pour se connecter au LLM hébergé localement via LMStudio
model = OpenAIServerModel(
    api_base ="http://192.168.1.141:1234/v1",
    model_id="Qwen/Qwen2.5-Coder-14B-Instruct-GGUF",  # Nom arbitraire pour le modèle local
    api_key="sk-dummy-key"  # Clé factice pour LMStudio
    # max_tokens=2096,

)

# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)

with open("prompts.yaml", 'r') as stream:
    prompt_templates = yaml.safe_load(stream)
    
# Tentative de correction pour ShellCommandTool
try:
    from tools.shell_tool import ShellCommandTool
    shell_tool = ShellCommandTool()
except Exception as e:
    print(f"Erreur lors du chargement de ShellCommandTool: {e}")
    # Créer une version simplifiée de l'outil si nécessaire
    shell_tool = None

agent = CodeAgent(
    model=model,
    tools=[final_answer, DuckDuckGoSearchTool(), VisitWebpageTool(), CreateFileTool(), ModifyFileTool()],
    max_steps=6,
    verbosity_level=1,
    grammar=None,
    planning_interval=None,
    name=None,
    description=None,
    prompt_templates=prompt_templates
)

# Ajouter ShellCommandTool conditionnellement
if shell_tool is not None:
    agent.tools['shell_command'] = shell_tool

# Sauvegarder manuellement sans utiliser to_dict() pour éviter les erreurs de validation
agent_data = {
    "name": agent.name,
    "description": agent.description,
    "model": agent.model.to_dict() if hasattr(agent.model, "to_dict") else str(agent.model),
    "tools": [tool.__class__.__name__ for tool in agent.tools.values()],
    "max_steps": agent.max_steps,
    "grammar": agent.grammar,
    "planning_interval": agent.planning_interval,
}

# # Sauvegarder l'agent au format JSON personnalisé
# with open("agent.json", "w", encoding="utf-8") as f:
#     json.dump(agent_data, f, ensure_ascii=False, indent=2)

# # La méthode push_to_hub pose problème avec les emojis, utiliser plutôt le script push_to_hf.py
# print("Agent sauvegardé dans agent.json. Utilisez push_to_hf.py pour le pousser sur Hugging Face.")

# Utiliser l'API Hugging Face directement avec encodage UTF-8
# try:
#     api = HfApi()
#     api.upload_file(
#         path_or_fileobj="agent.json",
#         path_in_repo="agent.json",
#         repo_id="KebabLover/SmolCoderAgent_0_1",
#         repo_type="space",
#         commit_message="Mise à jour de l'agent"
#     )
#     print("Agent poussé avec succès vers Hugging Face!")
# except Exception as e:
#     print(f"Erreur lors du push vers Hugging Face: {e}")

GradioUI(agent).launch()