Sirine MILI
commited on
Commit
·
21132c7
1
Parent(s):
22b4d69
[CHANGES]
Browse files- app-withshortcut.py +234 -0
- common/__init__.py +0 -0
- common/mylogger.py +41 -0
- env.example +3 -0
- multiagents.py +137 -0
- myprompts.py +14 -0
- requirements.txt +11 -4
- tools/__init__.py +0 -0
- tools/fetch.py +100 -0
- tools/image.py +40 -0
- tools/stt.py +72 -0
- tools/yttranscript.py +72 -0
- vllm_asopenai_test.py +17 -0
- vllm_test.py +18 -0
- webpage +0 -0
app-withshortcut.py
ADDED
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
import requests
|
4 |
+
import inspect
|
5 |
+
import pandas as pd
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
|
8 |
+
from myagent import BasicAgent # Import your agent class from myagent.py
|
9 |
+
from multiagents import MultiAgent
|
10 |
+
|
11 |
+
from phoenix.otel import register
|
12 |
+
from openinference.instrumentation.smolagents import SmolagentsInstrumentor
|
13 |
+
|
14 |
+
# use space_host var to determine if running in HF space or locally, if so register local instrumentation
|
15 |
+
space_host_startup = os.getenv("SPACE_HOST")
|
16 |
+
if not space_host_startup:
|
17 |
+
register()
|
18 |
+
SmolagentsInstrumentor().instrument()
|
19 |
+
|
20 |
+
# (Keep Constants as is)
|
21 |
+
# --- Constants ---
|
22 |
+
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
23 |
+
load_dotenv()
|
24 |
+
|
25 |
+
max_questions = 20
|
26 |
+
|
27 |
+
# known answer, already solved, to avoid computation cost
|
28 |
+
known_answers = {
|
29 |
+
"f918266a-b3e0-4914-865d-4faa564f1aef": "0",
|
30 |
+
"a1e91b78-d3d8-4675-bb8d-62741b4b68a6": "3",
|
31 |
+
"2d83110e-a098-4ebb-9987-066c06fa42d0": "right",
|
32 |
+
"8e867cd7-cff9-4e6c-867a-ff5ddc2550be": "3",
|
33 |
+
"9d191bce-651d-4746-be2d-7ef8ecadb9c2": "extremely",
|
34 |
+
# Add more known answers as needed
|
35 |
+
}
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
def run_and_submit_all(nb_questions: int, profile: gr.OAuthProfile | None):
|
40 |
+
"""
|
41 |
+
Fetches all questions, runs my Agent on them, submits all answers,
|
42 |
+
and displays the results.
|
43 |
+
"""
|
44 |
+
# --- Determine HF Space Runtime URL and Repo URL ---
|
45 |
+
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
|
46 |
+
|
47 |
+
if profile:
|
48 |
+
username= f"{profile.username}"
|
49 |
+
print(f"User logged in: {username}")
|
50 |
+
else:
|
51 |
+
print("User not logged in.")
|
52 |
+
return "Please Login to Hugging Face with the button.", None
|
53 |
+
|
54 |
+
api_url = DEFAULT_API_URL
|
55 |
+
questions_url = f"{api_url}/questions"
|
56 |
+
file_url = f"{api_url}/files"
|
57 |
+
submit_url = f"{api_url}/submit"
|
58 |
+
|
59 |
+
# 1. Instantiate Agent
|
60 |
+
try:
|
61 |
+
# agent = BasicAgent()
|
62 |
+
agent = MultiAgent()
|
63 |
+
except Exception as e:
|
64 |
+
print(f"Error instantiating agent: {e}")
|
65 |
+
return f"Error initializing agent: {e}", None
|
66 |
+
# In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
|
67 |
+
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
68 |
+
print(agent_code)
|
69 |
+
|
70 |
+
# 2. Fetch Questions
|
71 |
+
print(f"Fetching questions from: {questions_url}")
|
72 |
+
try:
|
73 |
+
response = requests.get(questions_url, timeout=15)
|
74 |
+
response.raise_for_status()
|
75 |
+
questions_data = response.json()
|
76 |
+
if not questions_data:
|
77 |
+
print("Fetched questions list is empty.")
|
78 |
+
return "Fetched questions list is empty or invalid format.", None
|
79 |
+
print(f"Fetched {len(questions_data)} questions.")
|
80 |
+
except requests.exceptions.RequestException as e:
|
81 |
+
print(f"Error fetching questions: {e}")
|
82 |
+
return f"Error fetching questions: {e}", None
|
83 |
+
except requests.exceptions.JSONDecodeError as e:
|
84 |
+
print(f"Error decoding JSON response from questions endpoint: {e}")
|
85 |
+
print(f"Response text: {response.text[:500]}")
|
86 |
+
return f"Error decoding server response for questions: {e}", None
|
87 |
+
except Exception as e:
|
88 |
+
print(f"An unexpected error occurred fetching questions: {e}")
|
89 |
+
return f"An unexpected error occurred fetching questions: {e}", None
|
90 |
+
|
91 |
+
# 3. Run your Agent
|
92 |
+
results_log = []
|
93 |
+
answers_payload = []
|
94 |
+
|
95 |
+
# for testing keep only some questions
|
96 |
+
questions_data = questions_data[:nb_questions]
|
97 |
+
|
98 |
+
print(f"Running agent on {len(questions_data)} questions...")
|
99 |
+
for item in questions_data:
|
100 |
+
task_id = item.get("task_id")
|
101 |
+
question_text = item.get("question")
|
102 |
+
file_name = item.get("file_name")
|
103 |
+
file_question_url = None
|
104 |
+
if file_name:
|
105 |
+
file_question_url = f"{file_url}/{task_id}"
|
106 |
+
if not task_id or question_text is None:
|
107 |
+
print(f"Skipping item with missing task_id or question: {item}")
|
108 |
+
continue
|
109 |
+
try:
|
110 |
+
agent_question = question_text
|
111 |
+
if file_question_url:
|
112 |
+
agent_question += f"\n\nFile URL: {file_question_url}"
|
113 |
+
|
114 |
+
shortcut = known_answers.get(task_id)
|
115 |
+
if shortcut:
|
116 |
+
submitted_answer = shortcut
|
117 |
+
else:
|
118 |
+
submitted_answer = agent(agent_question)
|
119 |
+
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
120 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
121 |
+
except Exception as e:
|
122 |
+
print(f"Error running agent on task {task_id}: {e}")
|
123 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
124 |
+
|
125 |
+
if not answers_payload:
|
126 |
+
print("Agent did not produce any answers to submit.")
|
127 |
+
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
128 |
+
|
129 |
+
# 4. Prepare Submission
|
130 |
+
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
131 |
+
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
132 |
+
print(status_update)
|
133 |
+
|
134 |
+
# 5. Submit
|
135 |
+
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
136 |
+
try:
|
137 |
+
response = requests.post(submit_url, json=submission_data, timeout=60)
|
138 |
+
response.raise_for_status()
|
139 |
+
result_data = response.json()
|
140 |
+
final_status = (
|
141 |
+
f"Submission Successful!\n"
|
142 |
+
f"User: {result_data.get('username')}\n"
|
143 |
+
f"Overall Score: {result_data.get('score', 'N/A')}% "
|
144 |
+
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
|
145 |
+
f"Message: {result_data.get('message', 'No message received.')}"
|
146 |
+
)
|
147 |
+
print("Submission successful.")
|
148 |
+
results_df = pd.DataFrame(results_log)
|
149 |
+
return final_status, results_df
|
150 |
+
except requests.exceptions.HTTPError as e:
|
151 |
+
error_detail = f"Server responded with status {e.response.status_code}."
|
152 |
+
try:
|
153 |
+
error_json = e.response.json()
|
154 |
+
error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
|
155 |
+
except requests.exceptions.JSONDecodeError:
|
156 |
+
error_detail += f" Response: {e.response.text[:500]}"
|
157 |
+
status_message = f"Submission Failed: {error_detail}"
|
158 |
+
print(status_message)
|
159 |
+
results_df = pd.DataFrame(results_log)
|
160 |
+
return status_message, results_df
|
161 |
+
except requests.exceptions.Timeout:
|
162 |
+
status_message = "Submission Failed: The request timed out."
|
163 |
+
print(status_message)
|
164 |
+
results_df = pd.DataFrame(results_log)
|
165 |
+
return status_message, results_df
|
166 |
+
except requests.exceptions.RequestException as e:
|
167 |
+
status_message = f"Submission Failed: Network error - {e}"
|
168 |
+
print(status_message)
|
169 |
+
results_df = pd.DataFrame(results_log)
|
170 |
+
return status_message, results_df
|
171 |
+
except Exception as e:
|
172 |
+
status_message = f"An unexpected error occurred during submission: {e}"
|
173 |
+
print(status_message)
|
174 |
+
results_df = pd.DataFrame(results_log)
|
175 |
+
return status_message, results_df
|
176 |
+
|
177 |
+
|
178 |
+
# --- Build Gradio Interface using Blocks ---
|
179 |
+
with gr.Blocks() as demo:
|
180 |
+
gr.Markdown("# Basic Agent Evaluation Runner")
|
181 |
+
gr.Markdown(
|
182 |
+
"""
|
183 |
+
**Instructions:**
|
184 |
+
|
185 |
+
1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
|
186 |
+
2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
|
187 |
+
3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
|
188 |
+
|
189 |
+
---
|
190 |
+
**Disclaimers:**
|
191 |
+
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
|
192 |
+
This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
|
193 |
+
"""
|
194 |
+
)
|
195 |
+
|
196 |
+
gr.LoginButton()
|
197 |
+
|
198 |
+
nb_questions = gr.Number(value=20)
|
199 |
+
|
200 |
+
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
201 |
+
|
202 |
+
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|
203 |
+
# Removed max_rows=10 from DataFrame constructor
|
204 |
+
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
205 |
+
|
206 |
+
run_button.click(
|
207 |
+
fn=run_and_submit_all,
|
208 |
+
inputs=[nb_questions],
|
209 |
+
outputs=[status_output, results_table]
|
210 |
+
)
|
211 |
+
|
212 |
+
if __name__ == "__main__":
|
213 |
+
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
214 |
+
# Check for SPACE_HOST and SPACE_ID at startup for information
|
215 |
+
space_host_startup = os.getenv("SPACE_HOST")
|
216 |
+
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
|
217 |
+
|
218 |
+
if space_host_startup:
|
219 |
+
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
220 |
+
print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
|
221 |
+
else:
|
222 |
+
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
223 |
+
|
224 |
+
if space_id_startup: # Print repo URLs if SPACE_ID is found
|
225 |
+
print(f"✅ SPACE_ID found: {space_id_startup}")
|
226 |
+
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
227 |
+
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
|
228 |
+
else:
|
229 |
+
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
230 |
+
|
231 |
+
print("-"*(60 + len(" App Starting ")) + "\n")
|
232 |
+
|
233 |
+
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
234 |
+
demo.launch(debug=True, share=False)
|
common/__init__.py
ADDED
File without changes
|
common/mylogger.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
|
3 |
+
def save_file_with_timestamp(content: str, file_name: str, extension: str) -> str:
|
4 |
+
"""
|
5 |
+
Save content to a file with a timestamp.
|
6 |
+
Args:
|
7 |
+
content (str): The content to save.
|
8 |
+
file_name (str): The base name of the file.
|
9 |
+
Returns:
|
10 |
+
str: The path to the saved file.
|
11 |
+
"""
|
12 |
+
try:
|
13 |
+
# save content to a file in test folder before returning
|
14 |
+
# compute filepath with correct extension based on convert_to_markdown and add a timestamp for unicity
|
15 |
+
|
16 |
+
unicity_suffix = str(int(time.time()))
|
17 |
+
|
18 |
+
file_path = f"test/{file_name}_{unicity_suffix}.{extension}"
|
19 |
+
with open(file_name, "w", encoding="utf-8") as f:
|
20 |
+
f.write(content)
|
21 |
+
except Exception as e:
|
22 |
+
print(f"Error saving content to file: {e}")
|
23 |
+
return file_name
|
24 |
+
|
25 |
+
|
26 |
+
def mylog(agent_name: str, message: str, depth: int = 0) -> None:
|
27 |
+
"""
|
28 |
+
Log a message with indentation based on the depth.
|
29 |
+
Args:
|
30 |
+
agent_name (str): The name of the agent.
|
31 |
+
message (str): The message to log.
|
32 |
+
depth (int): The depth of the log message.
|
33 |
+
"""
|
34 |
+
indent = " " * (depth * 4)
|
35 |
+
try:
|
36 |
+
# log agent call in file
|
37 |
+
with open("logs/agent_calls.log", "a") as log_file:
|
38 |
+
log_file.write(f"{indent}{agent_name}: {message}\n")
|
39 |
+
except Exception as e:
|
40 |
+
print(f"Error logging agent call: {e}")
|
41 |
+
|
env.example
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
HF_TOKEN=<<your hf token>>
|
2 |
+
SPACE_ID=<<your hf sapce id>>
|
3 |
+
OPENAI_API_KEY=<<your open ai key>>
|
multiagents.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# a multi agent proposal to solve HF agent course final assignment
|
3 |
+
import os
|
4 |
+
import dotenv
|
5 |
+
from smolagents import CodeAgent
|
6 |
+
from smolagents import OpenAIServerModel
|
7 |
+
from tools.fetch import fetch_webpage, search_web
|
8 |
+
from smolagents import PythonInterpreterTool
|
9 |
+
from tools.yttranscript import get_youtube_transcript, get_youtube_title_description
|
10 |
+
from tools.stt import get_text_transcript_from_audio_file
|
11 |
+
from tools.image import analyze_image
|
12 |
+
from common.mylogger import mylog
|
13 |
+
import myprompts
|
14 |
+
|
15 |
+
dotenv.load_dotenv()
|
16 |
+
|
17 |
+
gemini_model = OpenAIServerModel(
|
18 |
+
model_id="gemini-2.0-flash",
|
19 |
+
api_key=os.environ["GEMINI_API_KEY"],
|
20 |
+
# Google Gemini OpenAI-compatible API base URL
|
21 |
+
api_base="https://generativelanguage.googleapis.com/v1beta/openai/",
|
22 |
+
)
|
23 |
+
|
24 |
+
vllm_model = OpenAIServerModel(
|
25 |
+
model_id="Qwen/Qwen2.5-1.5B-Instruct",
|
26 |
+
api_base="http://192.168.1.39:18000/v1",
|
27 |
+
api_key="token-abc123",
|
28 |
+
)
|
29 |
+
|
30 |
+
openai_41nano_model = OpenAIServerModel(
|
31 |
+
model_id="gpt-4.1-nano",
|
32 |
+
api_base="https://api.openai.com/v1",
|
33 |
+
api_key=os.environ["OPENAI_API_KEY"],
|
34 |
+
)
|
35 |
+
|
36 |
+
openai_41mini_model = OpenAIServerModel(
|
37 |
+
model_id="gpt-4.1-mini",
|
38 |
+
api_base="https://api.openai.com/v1",
|
39 |
+
api_key=os.environ["OPENAI_API_KEY"],
|
40 |
+
)
|
41 |
+
|
42 |
+
|
43 |
+
def check_final_answer(final_answer, agent_memory) -> bool:
|
44 |
+
"""
|
45 |
+
Check if the final answer is correct.
|
46 |
+
basic check on the length of the answer.
|
47 |
+
"""
|
48 |
+
mylog("check_final_answer", final_answer)
|
49 |
+
# if return answer is more than 200 characters, we will assume it is not correct
|
50 |
+
if len(str(final_answer)) > 200:
|
51 |
+
return False
|
52 |
+
else:
|
53 |
+
return True
|
54 |
+
|
55 |
+
|
56 |
+
web_agent = CodeAgent(
|
57 |
+
model=openai_41nano_model,
|
58 |
+
tools=[
|
59 |
+
search_web,
|
60 |
+
fetch_webpage,
|
61 |
+
],
|
62 |
+
name="web_agent",
|
63 |
+
description="Use search engine to find webpages related to a subject and get the page content",
|
64 |
+
additional_authorized_imports=["pandas", "numpy","bs4"],
|
65 |
+
verbosity_level=1,
|
66 |
+
max_steps=7,
|
67 |
+
)
|
68 |
+
|
69 |
+
audiovideo_agent = CodeAgent(
|
70 |
+
model=openai_41nano_model,
|
71 |
+
tools=[
|
72 |
+
get_youtube_transcript,
|
73 |
+
get_youtube_title_description,
|
74 |
+
get_text_transcript_from_audio_file,
|
75 |
+
analyze_image
|
76 |
+
],
|
77 |
+
name="audiovideo_agent",
|
78 |
+
description="Extracts information from image, video or audio files from the web",
|
79 |
+
additional_authorized_imports=["pandas", "numpy","bs4", "requests"],
|
80 |
+
verbosity_level=1,
|
81 |
+
max_steps=7,
|
82 |
+
)
|
83 |
+
|
84 |
+
|
85 |
+
|
86 |
+
manager_agent = CodeAgent(
|
87 |
+
model=openai_41mini_model,
|
88 |
+
tools=[ PythonInterpreterTool()],
|
89 |
+
managed_agents=[web_agent, audiovideo_agent],
|
90 |
+
additional_authorized_imports=["pandas", "numpy","bs4"],
|
91 |
+
planning_interval=5,
|
92 |
+
verbosity_level=2,
|
93 |
+
final_answer_checks=[check_final_answer],
|
94 |
+
max_steps=15,
|
95 |
+
name="manager_agent",
|
96 |
+
description="A manager agent that coordinates the work of other agents to answer questions.",
|
97 |
+
)
|
98 |
+
|
99 |
+
class MultiAgent:
|
100 |
+
def __init__(self):
|
101 |
+
print("BasicAgent initialized.")
|
102 |
+
|
103 |
+
def __call__(self, question: str) -> str:
|
104 |
+
mylog(self.__class__.__name__, question)
|
105 |
+
|
106 |
+
try:
|
107 |
+
prefix = """You are the top agent of a multi-agent system that can answer questions by coordinating the work of other agents.
|
108 |
+
You will receive a question and you will decide which agent to use to answer it.
|
109 |
+
You can use the web_agent to search the web for information and for fetching the content of a web page, or the audiovideo_agent to extract information from video or audio files.
|
110 |
+
You can also use your own knowledge to answer the question.
|
111 |
+
You need to respect the output format that is given to you.
|
112 |
+
Finding the correct answer to the question need reasoning and plannig, read the question carrefully, think step by step and do not skip any steps.
|
113 |
+
"""
|
114 |
+
|
115 |
+
question = prefix + "\nTHE QUESTION:\n" + question + '\n' + myprompts.output_format
|
116 |
+
|
117 |
+
fixed_answer = ""
|
118 |
+
|
119 |
+
fixed_answer = manager_agent.run(question)
|
120 |
+
|
121 |
+
return fixed_answer
|
122 |
+
except Exception as e:
|
123 |
+
error = f"An error occurred while processing the question: {e}"
|
124 |
+
print(error)
|
125 |
+
return error
|
126 |
+
|
127 |
+
|
128 |
+
if __name__ == "__main__":
|
129 |
+
# Example usage
|
130 |
+
|
131 |
+
question = """
|
132 |
+
What was the actual enrollment of the Malko competition in 2023?
|
133 |
+
"""
|
134 |
+
agent = MultiAgent()
|
135 |
+
answer = agent(question)
|
136 |
+
print(f"Answer: {answer}")
|
137 |
+
|
myprompts.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
review_prompt = """You are a reviewer agent. You will be given a question.
|
2 |
+
Your task it to assert if a LLM agent with access to web content can answer the question or not, or if a coding agent and more tools is needed to answer the question.
|
3 |
+
If the question is too complex for a LLM agent, you should return "code" as the answer else you should return "model".
|
4 |
+
"""
|
5 |
+
|
6 |
+
model_prompt = """You are a LLM agent. You will be given a question.
|
7 |
+
Your task it to answer the question using the tools you have access to.
|
8 |
+
take time to analyse the steps to answer the question.
|
9 |
+
if a filename is given in the question you can infer the url
|
10 |
+
"""
|
11 |
+
|
12 |
+
output_format = """OUTPUT FORMAT should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
|
13 |
+
If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise, use digits only.
|
14 |
+
If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string."""
|
requirements.txt
CHANGED
@@ -1,7 +1,14 @@
|
|
1 |
gradio
|
|
|
2 |
requests
|
3 |
-
|
4 |
-
smolagents
|
5 |
duckduckgo-search
|
6 |
-
openai
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
gradio
|
2 |
+
gradio[oauth]
|
3 |
requests
|
4 |
+
python-dotenv
|
5 |
+
smolagents
|
6 |
duckduckgo-search
|
7 |
+
smolagents[openai]
|
8 |
+
markdownify
|
9 |
+
beautifulsoup4
|
10 |
+
transformers
|
11 |
+
smolagents[transformers]
|
12 |
+
smolagents[audio]
|
13 |
+
smolagents[telemetry,toolkit]
|
14 |
+
youtube_transcript_api
|
tools/__init__.py
ADDED
File without changes
|
tools/fetch.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# this is asmolagent too to fetch html content from a url
|
2 |
+
from smolagents import tool
|
3 |
+
import requests
|
4 |
+
from markdownify import markdownify as md
|
5 |
+
from bs4 import BeautifulSoup
|
6 |
+
from common.mylogger import save_file_with_timestamp, mylog
|
7 |
+
|
8 |
+
@tool
|
9 |
+
def fetch_webpage(url: str, convert_to_markdown: bool = True) -> str:
|
10 |
+
"""
|
11 |
+
Fetches the HTML content of a given URL.
|
12 |
+
if markdown conversion is enabled, it will remove script and style and return the text content as markdown else return raw unfiltered HTML
|
13 |
+
Args:
|
14 |
+
url (str): The URL to fetch.
|
15 |
+
convert_to_markdown (bool): If True, convert the HTML content to Markdown format. else return the raw HTML.
|
16 |
+
Returns:
|
17 |
+
str: The HTML content of the URL.
|
18 |
+
"""
|
19 |
+
content = None
|
20 |
+
response = requests.get(url, timeout=30)
|
21 |
+
if (convert_to_markdown):
|
22 |
+
soup = BeautifulSoup(response.text, "html.parser")
|
23 |
+
# remove script and style tags
|
24 |
+
for script in soup(["script", "style"]):
|
25 |
+
script.extract()
|
26 |
+
|
27 |
+
# for wikipedia only keep the main content
|
28 |
+
if "wikipedia.org" in url:
|
29 |
+
main_content = soup.find("main",{"id":"content"})
|
30 |
+
if main_content:
|
31 |
+
content = md(str(main_content),strip=['script', 'style'], heading_style="ATX").strip()
|
32 |
+
else:
|
33 |
+
content = md(response.text,strip=['script', 'style'], heading_style="ATX").strip()
|
34 |
+
else:
|
35 |
+
content = response.text
|
36 |
+
|
37 |
+
save_file_with_timestamp(content, "webpage", ".md" if convert_to_markdown else ".html")
|
38 |
+
|
39 |
+
return content
|
40 |
+
|
41 |
+
@tool
|
42 |
+
# this tool allow web search on a local SearXNG instance
|
43 |
+
def search_web(query: str, num_results: int = 5) -> list:
|
44 |
+
"""
|
45 |
+
Perform a web search using local SearXNG instance.
|
46 |
+
Args:
|
47 |
+
query (str): The search query.
|
48 |
+
num_results (int): The number of results to return.
|
49 |
+
Returns:
|
50 |
+
list: A list of search results sorted by score with {url, title, content, score} for each result.
|
51 |
+
"""
|
52 |
+
# local metaserach engine searxng, run on localhost:8888
|
53 |
+
searxng_url = "http://localhost:8888/search"
|
54 |
+
params = {"q": query, "format": 'json'}
|
55 |
+
response = requests.get(searxng_url, params=params)
|
56 |
+
if response.status_code == 200:
|
57 |
+
ret = response.json()
|
58 |
+
# keep only the response'results' array
|
59 |
+
results = ret.get("results", [])
|
60 |
+
# keep only the first num_results
|
61 |
+
results = results[:num_results]
|
62 |
+
# for each result keep only the url, title and content ans score
|
63 |
+
results = [
|
64 |
+
{
|
65 |
+
"url": result.get("url"),
|
66 |
+
"title": result.get("title"),
|
67 |
+
"content": result.get("content"),
|
68 |
+
"score": result.get("score"),
|
69 |
+
}
|
70 |
+
for result in results
|
71 |
+
]
|
72 |
+
|
73 |
+
return results
|
74 |
+
|
75 |
+
else:
|
76 |
+
print(f"Error: {response.status_code}")
|
77 |
+
return []
|
78 |
+
|
79 |
+
if __name__ == "__main__":
|
80 |
+
|
81 |
+
try:
|
82 |
+
# Test the function
|
83 |
+
query = "What is the capital of France?"
|
84 |
+
results = search_web(query,3)
|
85 |
+
print(results)
|
86 |
+
except Exception as e:
|
87 |
+
print(f"An error occurred: {e}")
|
88 |
+
|
89 |
+
try:
|
90 |
+
# Test the function
|
91 |
+
video_id = "L1vXCYZAYYM" # Replace with your YouTube video ID
|
92 |
+
video_url = "https://www.youtube.com/watch?v=" + video_id
|
93 |
+
url = "https://en.wikipedia.org/wiki/Malko_Competition"
|
94 |
+
# page_content = fetch_webpage(video_url)
|
95 |
+
page_content = fetch_webpage(url, convert_to_markdown=True)
|
96 |
+
print(page_content.encode("utf-8"))
|
97 |
+
except Exception as e:
|
98 |
+
print(f"An error occurred: {e}")
|
99 |
+
|
100 |
+
|
tools/image.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# image analyzer using open ai model
|
2 |
+
|
3 |
+
from smolagents import tool
|
4 |
+
from openai import OpenAI
|
5 |
+
import dotenv
|
6 |
+
dotenv.load_dotenv()
|
7 |
+
|
8 |
+
@tool
|
9 |
+
def analyze_image(question: str, image_url: str) -> str:
|
10 |
+
"""
|
11 |
+
Analyze an image using OpenAI's API.
|
12 |
+
Args:
|
13 |
+
question (str): The question to ask about the image. eg. "What is in this image?"
|
14 |
+
image_url (str): The URL of the image to analyze.
|
15 |
+
"""
|
16 |
+
client = OpenAI()
|
17 |
+
|
18 |
+
response = client.responses.create(
|
19 |
+
model="gpt-4o-mini",
|
20 |
+
input=[
|
21 |
+
{
|
22 |
+
"role": "user",
|
23 |
+
"content": [
|
24 |
+
{ "type": "input_text", "text": f"{question}" },
|
25 |
+
{
|
26 |
+
"type": "input_image",
|
27 |
+
"image_url": f"{image_url}",
|
28 |
+
}
|
29 |
+
]
|
30 |
+
}
|
31 |
+
]
|
32 |
+
)
|
33 |
+
|
34 |
+
return response
|
35 |
+
|
36 |
+
if __name__ == "__main__":
|
37 |
+
question = "What is the main subject of this image?"
|
38 |
+
image_url = "https://agents-course-unit4-scoring.hf.space/files/cca530fc-4052-43b2-b130-b30968d8aa44"
|
39 |
+
answer = analyze_image(question, image_url)
|
40 |
+
print(f"Answer: {answer}")
|
tools/stt.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
import os
|
3 |
+
import shutil
|
4 |
+
import subprocess
|
5 |
+
import requests
|
6 |
+
import uuid
|
7 |
+
from smolagents import tool
|
8 |
+
import dotenv
|
9 |
+
dotenv.load_dotenv()
|
10 |
+
|
11 |
+
@tool
|
12 |
+
def get_text_transcript_from_audio_file(file_url: str, language: str = "en-US") -> str:
|
13 |
+
"""
|
14 |
+
Convert speech to text using local whisper model.
|
15 |
+
This function downloads an audio file from a given URL, converts it to WAV format if necessary,
|
16 |
+
then use whisper model to transcribe the audio to text.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
file_url (str): The URL of the audio file to transcribe.
|
20 |
+
language (str): The language code for the transcription. Default is "en-US".
|
21 |
+
|
22 |
+
Returns:
|
23 |
+
str: The transcribed text.
|
24 |
+
"""
|
25 |
+
|
26 |
+
file_name = uuid.uuid4().hex +".mp3"
|
27 |
+
|
28 |
+
dest_folder = os.getenv("STT_FOLDER")
|
29 |
+
if not dest_folder:
|
30 |
+
dest_folder = '.'
|
31 |
+
file_path = os.path.join(dest_folder + "\\tmp", file_name)
|
32 |
+
# 1. download the file from url (in pure python without wget or curl)
|
33 |
+
if not os.path.exists(file_name):
|
34 |
+
response = requests.get(file_url)
|
35 |
+
if response.status_code == 200:
|
36 |
+
with open(file_path, "wb") as f:
|
37 |
+
f.write(response.content)
|
38 |
+
else:
|
39 |
+
raise Exception(f"Error downloading file: {response.status_code}")
|
40 |
+
|
41 |
+
# 2. if it is a mp3 convert to wav with ffmpeg exec
|
42 |
+
if file_name.endswith(".mp3"):
|
43 |
+
cmd = f"ffmpeg -i {file_path} -ac 1 -ar 16000 -c:a pcm_s16le {file_path[:-4]}.wav"
|
44 |
+
cmd_as_list = cmd.split()
|
45 |
+
subprocess.run(cmd_as_list, cwd=dest_folder, check=True)
|
46 |
+
file_path = file_path[:-4] + ".wav"
|
47 |
+
file_name = file_name[:-4] + ".wav"
|
48 |
+
|
49 |
+
# 3. copy file to data folder
|
50 |
+
shutil.copy2(file_path, os.path.join(dest_folder, "testdata/"))
|
51 |
+
|
52 |
+
|
53 |
+
# 4. call docker run command
|
54 |
+
docker_command = f"""
|
55 |
+
docker run
|
56 |
+
-v {dest_folder}/models:/app/models
|
57 |
+
-v {dest_folder}/testdata:/app/testdata
|
58 |
+
ghcr.io/appleboy/go-whisper:latest
|
59 |
+
--model /app/models/ggml-small.bin
|
60 |
+
--audio-path /app/testdata/{file_name}
|
61 |
+
"""
|
62 |
+
|
63 |
+
subprocess.run(docker_command.split(), cwd=dest_folder, check=True)
|
64 |
+
# 5. cat the output file an return it
|
65 |
+
output_filepath = os.path.join(dest_folder, "testdata", f"{file_name[:-4]}.txt")
|
66 |
+
with open(output_filepath, "r") as f:
|
67 |
+
text = f.read()
|
68 |
+
return text
|
69 |
+
|
70 |
+
if __name__ == "__main__":
|
71 |
+
transcript = get_text_transcript_from_audio_file("https://agents-course-unit4-scoring.hf.space/files/99c9cc74-fdc8-46c6-8f8d-3ce2d3bfeea3", )
|
72 |
+
print(transcript)
|
tools/yttranscript.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from smolagents import tool
|
3 |
+
from youtube_transcript_api import YouTubeTranscriptApi
|
4 |
+
from bs4 import BeautifulSoup
|
5 |
+
from tools.fetch import fetch_webpage
|
6 |
+
|
7 |
+
@tool
|
8 |
+
# a function to get youtube transcript from video id
|
9 |
+
def get_youtube_transcript(video_id: str) -> str:
|
10 |
+
"""
|
11 |
+
Fetches the transcript of a YouTube video given its video ID.
|
12 |
+
Args:
|
13 |
+
video_id (str): The ID of the YouTube video. Pass in the video ID, NOT the video URL. For a video with the URL https://www.youtube.com/watch?v=12345 the ID is 12345.
|
14 |
+
Returns:
|
15 |
+
str: The transcript of the YouTube video. as a single string with each line separated by a newline character.
|
16 |
+
"""
|
17 |
+
# Initialize the YouTubeTranscriptApi
|
18 |
+
ytt_api = YouTubeTranscriptApi()
|
19 |
+
fetched_transcript = ytt_api.fetch(video_id)
|
20 |
+
raw_data = fetched_transcript.to_raw_data()
|
21 |
+
# raw data is in the form of [{ 'text': 'Hey there', 'start': 0.0, 'duration': 1.54 }, { 'text': 'how are you',, 'start': 1.54, 'duration': 4.16 }, ... ] we will return ony the text element as lines
|
22 |
+
transcript = "\n".join([item['text'] for item in raw_data])
|
23 |
+
return transcript
|
24 |
+
|
25 |
+
|
26 |
+
@tool
|
27 |
+
# a function to get video title and description from video url
|
28 |
+
def get_youtube_title_description(video_url: str) -> str:
|
29 |
+
"""
|
30 |
+
Fetches the title and description of a YouTube video given its video ID.
|
31 |
+
Args:
|
32 |
+
video_url (str): The url of the YouTube video.
|
33 |
+
Returns:
|
34 |
+
str: The title and description of the YouTube video.
|
35 |
+
"""
|
36 |
+
# Initialize the YouTube object
|
37 |
+
soup = BeautifulSoup(fetch_webpage(video_url, convert_to_markdown=False), "html.parser")
|
38 |
+
# Extract the title by looking at the meta tag with name="title" and getting the content
|
39 |
+
metatitle = soup.find("meta", {"name": "title"})
|
40 |
+
if metatitle is not None:
|
41 |
+
title = metatitle["content"]
|
42 |
+
else:
|
43 |
+
title = "No title found"
|
44 |
+
|
45 |
+
# same for description
|
46 |
+
metadescription = soup.find("meta", {"name": "description"})
|
47 |
+
if metadescription is not None:
|
48 |
+
description = metadescription["content"]
|
49 |
+
else:
|
50 |
+
description = "No description found"
|
51 |
+
|
52 |
+
return f"Title: {title}\nDescription: {description}"
|
53 |
+
|
54 |
+
|
55 |
+
if __name__ == "__main__":
|
56 |
+
from dotenv import load_dotenv
|
57 |
+
load_dotenv
|
58 |
+
# Test the function
|
59 |
+
video_id = "1htKBjuUWec" # Replace with your YouTube video ID
|
60 |
+
video_url = "https://www.youtube.com/watch?v=" + video_id
|
61 |
+
# Get the title and description
|
62 |
+
try:
|
63 |
+
title_description = get_youtube_title_description(video_url)
|
64 |
+
print(title_description)
|
65 |
+
except Exception as e:
|
66 |
+
print(f"Error fetching title and description: {e}")
|
67 |
+
|
68 |
+
try:
|
69 |
+
transcript = get_youtube_transcript(video_id)
|
70 |
+
except Exception as e:
|
71 |
+
print(f"Error fetching transcript: {e}")
|
72 |
+
print(transcript)
|
vllm_asopenai_test.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from smolagents import OpenAIServerModel, CodeAgent
|
3 |
+
|
4 |
+
# test file in cas I need to run entirely locally
|
5 |
+
|
6 |
+
model = OpenAIServerModel(
|
7 |
+
model_id="Qwen/Qwen2.5-1.5B-Instruct",
|
8 |
+
api_base="http://192.168.1.39:18000/v1",
|
9 |
+
api_key="token-abc123",
|
10 |
+
)
|
11 |
+
|
12 |
+
myagent = CodeAgent(
|
13 |
+
model=model,
|
14 |
+
tools=[])
|
15 |
+
|
16 |
+
result = myagent.run("Hello who are you?") # Replace with your question
|
17 |
+
print(result)
|
vllm_test.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
|
3 |
+
# test file in cas I need to run entirely locally
|
4 |
+
# this test comptaibility with open ai
|
5 |
+
|
6 |
+
client = OpenAI(
|
7 |
+
base_url="http://192.168.1.39:18000/v1",
|
8 |
+
api_key="token-abc123",
|
9 |
+
)
|
10 |
+
|
11 |
+
completion = client.chat.completions.create(
|
12 |
+
model="Qwen/Qwen2.5-1.5B-Instruct",
|
13 |
+
messages=[
|
14 |
+
{"role": "user", "content": "Hello!"}
|
15 |
+
]
|
16 |
+
)
|
17 |
+
|
18 |
+
print(completion.choices[0].message)
|
webpage
ADDED
The diff for this file is too large to render.
See raw diff
|
|