Spaces:
Sleeping
Sleeping
clean wiki, stt, model upgrade
Browse files
agent.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
from dotenv import load_dotenv
|
2 |
from smolagents import CodeAgent
|
3 |
from smolagents import OpenAIServerModel
|
4 |
-
from tool import fetch_webpage, read_file_tool, get_youtube_transcript
|
5 |
|
6 |
from smolagents import VisitWebpageTool, WikipediaSearchTool, PythonInterpreterTool, DuckDuckGoSearchTool, WebSearchTool, SpeechToTextTool
|
7 |
|
@@ -10,15 +10,15 @@ from prompt import gaia_prompt
|
|
10 |
load_dotenv()
|
11 |
|
12 |
openai_nano_model = OpenAIServerModel(
|
13 |
-
model_id="gpt-4.1-nano-2025-04-14",
|
|
|
14 |
# model_id="o3-mini-2025-01-31",
|
15 |
)
|
16 |
|
17 |
gaia_agent = CodeAgent(
|
18 |
model=openai_nano_model,
|
19 |
-
tools=[fetch_webpage, DuckDuckGoSearchTool(), PythonInterpreterTool(), read_file_tool, get_youtube_transcript], # WikipediaSearchTool(), VisitWebpageTool(max_output_length=60000)
|
20 |
max_steps=7,
|
21 |
-
# verbosity_level=2,
|
22 |
additional_authorized_imports=["requests", "bs4", "pandas", "numpy", "markdownify"]
|
23 |
)
|
24 |
|
@@ -34,6 +34,7 @@ class GAIA_Agent:
|
|
34 |
full_context = self.system_prompt + "\nTHE QUESTION:\n" + question
|
35 |
|
36 |
final_answer = self.agent.run(full_context)
|
|
|
37 |
return final_answer
|
38 |
except Exception as e:
|
39 |
error = f"An error occurred while processing the question: {e}"
|
@@ -45,25 +46,10 @@ class GAIA_Agent:
|
|
45 |
# clean answer function
|
46 |
|
47 |
if __name__ == "__main__":
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
# f"""
|
52 |
-
# You are a general AI assistant. I will ask you a question. You can answer with the following template:[YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string. Remember: GAIA requires exact answer matching. Just provide the factual answer.
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
# print(gaia_prompt)
|
59 |
-
# answer = gaia_agent.run("""
|
60 |
-
# You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
|
61 |
-
# You can search for results and then visit a webpage to get more information. Break down the problem into smaller sub-problems and solve them one by one.
|
62 |
-
# Think like a human.
|
63 |
-
|
64 |
-
# What is the final numeric output from the attached Python code?
|
65 |
-
|
66 |
-
# ----
|
67 |
-
|
68 |
-
# """)
|
69 |
-
# print(f"this is the final answer the gaia agent gave ---> {answer}")
|
|
|
1 |
from dotenv import load_dotenv
|
2 |
from smolagents import CodeAgent
|
3 |
from smolagents import OpenAIServerModel
|
4 |
+
from tool import fetch_webpage, read_file_tool, get_youtube_transcript, transcribe_audio
|
5 |
|
6 |
from smolagents import VisitWebpageTool, WikipediaSearchTool, PythonInterpreterTool, DuckDuckGoSearchTool, WebSearchTool, SpeechToTextTool
|
7 |
|
|
|
10 |
load_dotenv()
|
11 |
|
12 |
openai_nano_model = OpenAIServerModel(
|
13 |
+
# model_id="gpt-4.1-nano-2025-04-14",
|
14 |
+
model_id="gpt-4.1-mini-2025-04-14",
|
15 |
# model_id="o3-mini-2025-01-31",
|
16 |
)
|
17 |
|
18 |
gaia_agent = CodeAgent(
|
19 |
model=openai_nano_model,
|
20 |
+
tools=[fetch_webpage, DuckDuckGoSearchTool(), PythonInterpreterTool(), read_file_tool, get_youtube_transcript, transcribe_audio], # WikipediaSearchTool(), VisitWebpageTool(max_output_length=60000)
|
21 |
max_steps=7,
|
|
|
22 |
additional_authorized_imports=["requests", "bs4", "pandas", "numpy", "markdownify"]
|
23 |
)
|
24 |
|
|
|
34 |
full_context = self.system_prompt + "\nTHE QUESTION:\n" + question
|
35 |
|
36 |
final_answer = self.agent.run(full_context)
|
37 |
+
print(f"final answer returned by agent ----> {final_answer}")
|
38 |
return final_answer
|
39 |
except Exception as e:
|
40 |
error = f"An error occurred while processing the question: {e}"
|
|
|
46 |
# clean answer function
|
47 |
|
48 |
if __name__ == "__main__":
|
49 |
+
answer = gaia_agent.run(
|
50 |
+
"""
|
51 |
+
You are a general AI assistant. I will ask you a question. You can answer with the following template:[YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string. Remember: GAIA requires exact answer matching. Just provide the factual answer.
|
|
|
|
|
52 |
|
53 |
+
How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia."""
|
54 |
+
)
|
55 |
+
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -77,7 +77,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
77 |
results_log = []
|
78 |
answers_payload = []
|
79 |
print(f"Running agent on {len(questions_data)} questions...")
|
80 |
-
to_answer = [1, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17,
|
81 |
for index, item in enumerate(questions_data):
|
82 |
|
83 |
if index + 1 in to_answer:
|
|
|
77 |
results_log = []
|
78 |
answers_payload = []
|
79 |
print(f"Running agent on {len(questions_data)} questions...")
|
80 |
+
to_answer = [1, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 19, 20]
|
81 |
for index, item in enumerate(questions_data):
|
82 |
|
83 |
if index + 1 in to_answer:
|
requirements.txt
CHANGED
@@ -9,4 +9,6 @@ smolagents[openai]
|
|
9 |
youtube-transcript-api
|
10 |
beautifulsoup4
|
11 |
markdownify
|
12 |
-
duckduckgo-search
|
|
|
|
|
|
9 |
youtube-transcript-api
|
10 |
beautifulsoup4
|
11 |
markdownify
|
12 |
+
duckduckgo-search
|
13 |
+
openpyxl
|
14 |
+
wikipedia-api
|
tool.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1 |
from smolagents import Tool, tool
|
2 |
from youtube_transcript_api import YouTubeTranscriptApi
|
|
|
|
|
3 |
|
4 |
@tool
|
5 |
def fetch_webpage(url: str, convert_to_markdown: bool = True) -> str:
|
@@ -31,6 +33,46 @@ def fetch_webpage(url: str, convert_to_markdown: bool = True) -> str:
|
|
31 |
|
32 |
# for wikipedia only keep the main content
|
33 |
if "wikipedia.org" in url:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
main_content = soup.find("main",{"id":"content"})
|
35 |
if main_content:
|
36 |
content = md(str(main_content),strip=['script', 'style'], heading_style="ATX").strip()
|
@@ -79,3 +121,41 @@ def get_youtube_transcript(video_id: str) -> str:
|
|
79 |
# raw data is in the form of [{ 'text': 'Hey there', 'start': 0.0, 'duration': 1.54 }, { 'text': 'how are you',, 'start': 1.54, 'duration': 4.16 }, ... ] we will return ony the text element as lines
|
80 |
transcript = "\n".join([item['text'] for item in raw_data])
|
81 |
return transcript
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from smolagents import Tool, tool
|
2 |
from youtube_transcript_api import YouTubeTranscriptApi
|
3 |
+
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
|
4 |
+
import torch
|
5 |
|
6 |
@tool
|
7 |
def fetch_webpage(url: str, convert_to_markdown: bool = True) -> str:
|
|
|
33 |
|
34 |
# for wikipedia only keep the main content
|
35 |
if "wikipedia.org" in url:
|
36 |
+
|
37 |
+
elements_to_remove = [
|
38 |
+
# Navigation and reference elements
|
39 |
+
{'class': 'navbox'},
|
40 |
+
{'class': 'navbox-group'},
|
41 |
+
{'class': 'reflist'},
|
42 |
+
{'class': 'navigation-box'},
|
43 |
+
{'class': 'sister-project'},
|
44 |
+
{'class': 'metadata'},
|
45 |
+
{'class': 'interlanguage-link'},
|
46 |
+
{'class': 'catlinks'},
|
47 |
+
{'id': 'References'},
|
48 |
+
{'id': 'External_links'},
|
49 |
+
{'id': 'Further_reading'},
|
50 |
+
{'id': 'See_also'},
|
51 |
+
{'id': 'Notes'},
|
52 |
+
]
|
53 |
+
|
54 |
+
for selector in elements_to_remove:
|
55 |
+
elements = soup.find_all(attrs=selector)
|
56 |
+
for element in elements:
|
57 |
+
# For ID-based elements, remove the parent section
|
58 |
+
if 'id' in selector:
|
59 |
+
parent = element.parent
|
60 |
+
if parent and parent.name in ['h2', 'h3', 'h4']:
|
61 |
+
# Remove heading and all content until next heading
|
62 |
+
current = parent
|
63 |
+
while current and current.next_sibling:
|
64 |
+
next_elem = current.next_sibling
|
65 |
+
if (hasattr(next_elem, 'name') and
|
66 |
+
next_elem.name in ['h2', 'h3', 'h4']):
|
67 |
+
break
|
68 |
+
if hasattr(next_elem, 'decompose'):
|
69 |
+
next_elem.decompose()
|
70 |
+
else:
|
71 |
+
current = next_elem
|
72 |
+
parent.decompose()
|
73 |
+
else:
|
74 |
+
element.decompose()
|
75 |
+
|
76 |
main_content = soup.find("main",{"id":"content"})
|
77 |
if main_content:
|
78 |
content = md(str(main_content),strip=['script', 'style'], heading_style="ATX").strip()
|
|
|
121 |
# raw data is in the form of [{ 'text': 'Hey there', 'start': 0.0, 'duration': 1.54 }, { 'text': 'how are you',, 'start': 1.54, 'duration': 4.16 }, ... ] we will return ony the text element as lines
|
122 |
transcript = "\n".join([item['text'] for item in raw_data])
|
123 |
return transcript
|
124 |
+
|
125 |
+
|
126 |
+
@tool
|
127 |
+
def transcribe_audio(audio_path: str) -> str:
|
128 |
+
"""
|
129 |
+
Speech to Text - transcribes audio file and returns the text
|
130 |
+
|
131 |
+
Args:
|
132 |
+
audio_path (str): Local file path to the audio
|
133 |
+
|
134 |
+
Returns:
|
135 |
+
str: The transcript of the audio file
|
136 |
+
"""
|
137 |
+
|
138 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
139 |
+
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
140 |
+
|
141 |
+
model_id = "openai/whisper-small"
|
142 |
+
|
143 |
+
model = AutoModelForSpeechSeq2Seq.from_pretrained(
|
144 |
+
model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
|
145 |
+
)
|
146 |
+
model.to(device)
|
147 |
+
|
148 |
+
processor = AutoProcessor.from_pretrained(model_id)
|
149 |
+
|
150 |
+
pipe = pipeline(
|
151 |
+
"automatic-speech-recognition",
|
152 |
+
model=model,
|
153 |
+
tokenizer=processor.tokenizer,
|
154 |
+
feature_extractor=processor.feature_extractor,
|
155 |
+
torch_dtype=torch_dtype,
|
156 |
+
device=device,
|
157 |
+
chunk_length_s=30,
|
158 |
+
)
|
159 |
+
|
160 |
+
result = pipe(audio_path)
|
161 |
+
return result
|