Spaces:
Running
Running
ffreemt
commited on
Commit
·
3305d00
1
Parent(s):
fa1b525
Update
Browse files- .gitignore +2 -0
- __pycache__/openai_model.cpython-312.pyc +0 -0
- app.py +8 -9
- basic_agent.py +10 -4
- openai_model.py +15 -4
.gitignore
CHANGED
@@ -1 +1,3 @@
|
|
1 |
.env-gemini
|
|
|
|
|
|
1 |
.env-gemini
|
2 |
+
exclude-from
|
3 |
+
.env
|
__pycache__/openai_model.cpython-312.pyc
CHANGED
Binary files a/__pycache__/openai_model.cpython-312.pyc and b/__pycache__/openai_model.cpython-312.pyc differ
|
|
app.py
CHANGED
@@ -7,6 +7,7 @@ import pandas as pd
|
|
7 |
import requests
|
8 |
import rich
|
9 |
import wikipediaapi
|
|
|
10 |
from mcp import StdioServerParameters
|
11 |
from smolagents import DuckDuckGoSearchTool, FinalAnswerTool, Tool, ToolCollection, VisitWebpageTool
|
12 |
from ycecream import y
|
@@ -88,14 +89,11 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
88 |
|
89 |
mcp_searxng_params = StdioServerParameters(
|
90 |
**{
|
91 |
-
|
92 |
-
|
93 |
-
"
|
94 |
-
|
95 |
-
|
96 |
-
"env": {
|
97 |
-
"SEARXNG_URL": os.getenv("SEARXNG_URL", "https://searx.be") # https://searx.space or run and set your own
|
98 |
-
}
|
99 |
}
|
100 |
)
|
101 |
|
@@ -114,7 +112,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
114 |
WikipediaSearchTool(),
|
115 |
FinalAnswerTool(),
|
116 |
],
|
117 |
-
verbosity_level=1,
|
118 |
)
|
119 |
agent.agent.visualize()
|
120 |
except Exception as e:
|
@@ -136,6 +134,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
136 |
continue
|
137 |
try:
|
138 |
submitted_answer = agent(question_text)
|
|
|
139 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
140 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
141 |
except Exception as e:
|
|
|
7 |
import requests
|
8 |
import rich
|
9 |
import wikipediaapi
|
10 |
+
from loguru import logger
|
11 |
from mcp import StdioServerParameters
|
12 |
from smolagents import DuckDuckGoSearchTool, FinalAnswerTool, Tool, ToolCollection, VisitWebpageTool
|
13 |
from ycecream import y
|
|
|
89 |
|
90 |
mcp_searxng_params = StdioServerParameters(
|
91 |
**{
|
92 |
+
"command": "npx",
|
93 |
+
"args": ["-y", "mcp-searxng"],
|
94 |
+
"env": {
|
95 |
+
"SEARXNG_URL": os.getenv("SEARXNG_URL", "https://searx.be") # https://searx.space or run and set your own
|
96 |
+
},
|
|
|
|
|
|
|
97 |
}
|
98 |
)
|
99 |
|
|
|
112 |
WikipediaSearchTool(),
|
113 |
FinalAnswerTool(),
|
114 |
],
|
115 |
+
# verbosity_level=1,
|
116 |
)
|
117 |
agent.agent.visualize()
|
118 |
except Exception as e:
|
|
|
134 |
continue
|
135 |
try:
|
136 |
submitted_answer = agent(question_text)
|
137 |
+
logger.debug(f">>> {submitted_answer=}, {question_text=}")
|
138 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
139 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
140 |
except Exception as e:
|
basic_agent.py
CHANGED
@@ -19,7 +19,7 @@ from get_model import get_model
|
|
19 |
from litellm_model import litellm_model
|
20 |
from openai_model import openai_model
|
21 |
|
22 |
-
|
23 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
24 |
SPACE_ID = os.getenv("SPACE_ID", "mikeee/final-assignment")
|
25 |
|
@@ -49,6 +49,9 @@ AUTHORIZED_IMPORTS = [
|
|
49 |
"io",
|
50 |
"glob",
|
51 |
"chess",
|
|
|
|
|
|
|
52 |
]
|
53 |
|
54 |
|
@@ -202,7 +205,7 @@ def main():
|
|
202 |
WikipediaSearchTool(),
|
203 |
FinalAnswerTool(),
|
204 |
],
|
205 |
-
verbosity_level=1,
|
206 |
)
|
207 |
agent.agent.visualize()
|
208 |
except Exception as e:
|
@@ -216,10 +219,10 @@ def main():
|
|
216 |
|
217 |
print(f"Running agent on {len(questions_data)} questions...")
|
218 |
|
219 |
-
|
220 |
# for item in questions_data[-1:]:
|
221 |
# for item in questions_data[14:15]:
|
222 |
-
for item in questions_data[-6:]:
|
223 |
task_id = item.get("task_id")
|
224 |
question_text = item.get("question")
|
225 |
if not task_id or question_text is None:
|
@@ -227,6 +230,9 @@ def main():
|
|
227 |
continue
|
228 |
try:
|
229 |
submitted_answer = agent(question_text)
|
|
|
|
|
|
|
230 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
231 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
232 |
except Exception as e:
|
|
|
19 |
from litellm_model import litellm_model
|
20 |
from openai_model import openai_model
|
21 |
|
22 |
+
console = rich.get_console()
|
23 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
24 |
SPACE_ID = os.getenv("SPACE_ID", "mikeee/final-assignment")
|
25 |
|
|
|
49 |
"io",
|
50 |
"glob",
|
51 |
"chess",
|
52 |
+
"speech_recognition",
|
53 |
+
"input",
|
54 |
+
"pandas.compat",
|
55 |
]
|
56 |
|
57 |
|
|
|
205 |
WikipediaSearchTool(),
|
206 |
FinalAnswerTool(),
|
207 |
],
|
208 |
+
# verbosity_level=1,
|
209 |
)
|
210 |
agent.agent.visualize()
|
211 |
except Exception as e:
|
|
|
219 |
|
220 |
print(f"Running agent on {len(questions_data)} questions...")
|
221 |
|
222 |
+
for idx, item in enumerate(questions_data):
|
223 |
# for item in questions_data[-1:]:
|
224 |
# for item in questions_data[14:15]:
|
225 |
+
# for item in questions_data[-6:]:
|
226 |
task_id = item.get("task_id")
|
227 |
question_text = item.get("question")
|
228 |
if not task_id or question_text is None:
|
|
|
230 |
continue
|
231 |
try:
|
232 |
submitted_answer = agent(question_text)
|
233 |
+
print(f"{idx=} {"*" * 20}")
|
234 |
+
print([submitted_answer, question_text])
|
235 |
+
|
236 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
237 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
238 |
except Exception as e:
|
openai_model.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2 |
|
3 |
import os
|
4 |
import sys
|
|
|
5 |
|
6 |
import rich
|
7 |
from loguru import logger
|
@@ -27,15 +28,25 @@ def openai_model(
|
|
27 |
# default llama4
|
28 |
api_base = api_base or "https://api.llama.com/compat/v1"
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
api_key = api_key or os.getenv("LLAMA_API_KEY")
|
31 |
if isinstance(api_key, str):
|
32 |
# LLAMA_API_KEY contains | and in win10 need to assign env var with ""
|
33 |
api_key = api_key.strip('"')
|
34 |
assert api_key, "LLAMA_API_KEY not set, set it and try again"
|
35 |
|
36 |
-
|
37 |
-
# "Llama-4-Scout-17B-16E-Instruct-FP8"
|
38 |
-
|
|
|
|
|
39 |
|
40 |
return OpenAIServerModel(
|
41 |
model_id,
|
@@ -50,9 +61,9 @@ def main():
|
|
50 |
logger.debug(sys.argv)
|
51 |
|
52 |
if not sys.argv[1:]:
|
53 |
-
logger.debug("default llama4 scout")
|
54 |
model = openai_model()
|
55 |
logger.debug(model(messages))
|
|
|
56 |
|
57 |
if len(sys.argv[1:]) < 3:
|
58 |
raise SystemExit("Provide at least three args (model_id, api_base, api_key)")
|
|
|
2 |
|
3 |
import os
|
4 |
import sys
|
5 |
+
from platform import node
|
6 |
|
7 |
import rich
|
8 |
from loguru import logger
|
|
|
28 |
# default llama4
|
29 |
api_base = api_base or "https://api.llama.com/compat/v1"
|
30 |
|
31 |
+
if "golay" in node() and ("llama.com" in api_base or "openai.com" in api_base):
|
32 |
+
os.environ.update(
|
33 |
+
HTTPS_PROXY="http://localhost:8081",
|
34 |
+
HTTP_PROXY="http://localhost:8081",
|
35 |
+
ALL_PROXY="http://localhost:8081",
|
36 |
+
NO_PROXY="localhost,127.0.0.1",
|
37 |
+
)
|
38 |
+
|
39 |
api_key = api_key or os.getenv("LLAMA_API_KEY")
|
40 |
if isinstance(api_key, str):
|
41 |
# LLAMA_API_KEY contains | and in win10 need to assign env var with ""
|
42 |
api_key = api_key.strip('"')
|
43 |
assert api_key, "LLAMA_API_KEY not set, set it and try again"
|
44 |
|
45 |
+
default = "Llama-4-Maverick-17B-128E-Instruct-FP8"
|
46 |
+
# default = "Llama-4-Scout-17B-16E-Instruct-FP8"
|
47 |
+
logger.debug(f"{default=}")
|
48 |
+
|
49 |
+
model_id = model_id or default
|
50 |
|
51 |
return OpenAIServerModel(
|
52 |
model_id,
|
|
|
61 |
logger.debug(sys.argv)
|
62 |
|
63 |
if not sys.argv[1:]:
|
|
|
64 |
model = openai_model()
|
65 |
logger.debug(model(messages))
|
66 |
+
return
|
67 |
|
68 |
if len(sys.argv[1:]) < 3:
|
69 |
raise SystemExit("Provide at least three args (model_id, api_base, api_key)")
|