Update app.py
Browse files
app.py
CHANGED
@@ -4,37 +4,52 @@ import requests
|
|
4 |
import pandas as pd
|
5 |
from smolagents import Agent
|
6 |
|
|
|
7 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
|
|
|
|
|
8 |
|
9 |
-
#
|
10 |
class HuggingFaceAPIAgent(Agent):
|
11 |
-
def __init__(self, model=
|
12 |
self.model = model
|
13 |
self.api_url = f"https://api-inference.huggingface.co/models/{model}"
|
14 |
-
self.headers = {"Authorization": f"Bearer {
|
15 |
|
16 |
def __call__(self, question: str) -> str:
|
17 |
-
print(f"
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
|
|
29 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
30 |
if not profile:
|
31 |
-
return "Please log in first.", None
|
32 |
|
33 |
-
username = profile.username
|
34 |
-
agent_code = f"https://huggingface.co/spaces/{
|
35 |
agent = HuggingFaceAPIAgent()
|
36 |
|
37 |
-
|
|
|
|
|
|
|
|
|
38 |
answers, log = [], []
|
39 |
|
40 |
for q in questions:
|
@@ -43,28 +58,40 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
43 |
except Exception as e:
|
44 |
answer = f"Error: {e}"
|
45 |
answers.append({"task_id": q["task_id"], "submitted_answer": answer})
|
46 |
-
log.append({
|
|
|
|
|
|
|
|
|
47 |
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
message = (
|
55 |
-
f"β
Submission complete
|
56 |
-
f"Score
|
57 |
-
f"{result.get('correct_count')}/{result.get('total_attempted')}
|
58 |
-
f"Message
|
59 |
)
|
60 |
return message, pd.DataFrame(log)
|
61 |
|
|
|
62 |
with gr.Blocks() as demo:
|
63 |
-
gr.Markdown("# π€
|
64 |
gr.LoginButton()
|
65 |
-
btn = gr.Button("Run Agent & Submit")
|
66 |
-
status = gr.Textbox(label="Status")
|
67 |
-
results = gr.DataFrame(label="
|
68 |
btn.click(fn=run_and_submit_all, outputs=[status, results])
|
69 |
|
70 |
-
demo.launch()
|
|
|
4 |
import pandas as pd
|
5 |
from smolagents import Agent
|
6 |
|
7 |
+
# Config
|
8 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
9 |
+
MODEL_NAME = "google/flan-t5-small"
|
10 |
+
SPACE_ID = os.getenv("SPACE_ID", "sirine1712/Final_Assignment_Template")
|
11 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
12 |
|
13 |
+
# Define a simple Hugging Face Inference Agent
|
14 |
class HuggingFaceAPIAgent(Agent):
|
15 |
+
def __init__(self, model=MODEL_NAME):
|
16 |
self.model = model
|
17 |
self.api_url = f"https://api-inference.huggingface.co/models/{model}"
|
18 |
+
self.headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
19 |
|
20 |
def __call__(self, question: str) -> str:
|
21 |
+
print(f"β³ Sending question to HF model: {question[:60]}")
|
22 |
+
try:
|
23 |
+
response = requests.post(
|
24 |
+
self.api_url,
|
25 |
+
headers=self.headers,
|
26 |
+
json={"inputs": question},
|
27 |
+
timeout=10
|
28 |
+
)
|
29 |
+
response.raise_for_status()
|
30 |
+
output = response.json()
|
31 |
+
if isinstance(output, list):
|
32 |
+
return output[0].get("generated_text", "No answer generated.")
|
33 |
+
else:
|
34 |
+
return output.get("generated_text", "No answer generated.")
|
35 |
+
except Exception as e:
|
36 |
+
print(f"β οΈ Error calling model: {e}")
|
37 |
+
return f"Error: {e}"
|
38 |
|
39 |
+
# Function to run agent and submit to GAIA scoring API
|
40 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
41 |
if not profile:
|
42 |
+
return "β Please log in first.", None
|
43 |
|
44 |
+
username = profile.username or "anonymous"
|
45 |
+
agent_code = f"https://huggingface.co/spaces/{SPACE_ID}/tree/main"
|
46 |
agent = HuggingFaceAPIAgent()
|
47 |
|
48 |
+
try:
|
49 |
+
questions = requests.get(f"{DEFAULT_API_URL}/questions", timeout=15).json()
|
50 |
+
except Exception as e:
|
51 |
+
return f"β Failed to fetch questions: {e}", None
|
52 |
+
|
53 |
answers, log = [], []
|
54 |
|
55 |
for q in questions:
|
|
|
58 |
except Exception as e:
|
59 |
answer = f"Error: {e}"
|
60 |
answers.append({"task_id": q["task_id"], "submitted_answer": answer})
|
61 |
+
log.append({
|
62 |
+
"Task ID": q["task_id"],
|
63 |
+
"Question": q["question"],
|
64 |
+
"Submitted Answer": answer
|
65 |
+
})
|
66 |
|
67 |
+
try:
|
68 |
+
result = requests.post(
|
69 |
+
f"{DEFAULT_API_URL}/submit",
|
70 |
+
json={
|
71 |
+
"username": username,
|
72 |
+
"agent_code": agent_code,
|
73 |
+
"answers": answers
|
74 |
+
},
|
75 |
+
timeout=10
|
76 |
+
).json()
|
77 |
+
except Exception as e:
|
78 |
+
return f"β Submission failed: {e}", pd.DataFrame(log)
|
79 |
|
80 |
message = (
|
81 |
+
f"β
**Submission complete!**\n"
|
82 |
+
f"**Score:** {result.get('score')}%\n"
|
83 |
+
f"**Correct:** {result.get('correct_count')}/{result.get('total_attempted')}\n"
|
84 |
+
f"**Message:** {result.get('message')}"
|
85 |
)
|
86 |
return message, pd.DataFrame(log)
|
87 |
|
88 |
+
# Gradio UI
|
89 |
with gr.Blocks() as demo:
|
90 |
+
gr.Markdown("# π€ HuggingFace Inference Agent\nA minimal agent using FLAN-T5 on HuggingFace Inference API.")
|
91 |
gr.LoginButton()
|
92 |
+
btn = gr.Button("π Run Agent & Submit")
|
93 |
+
status = gr.Textbox(label="Status", lines=4)
|
94 |
+
results = gr.DataFrame(label="Agent Output Log")
|
95 |
btn.click(fn=run_and_submit_all, outputs=[status, results])
|
96 |
|
97 |
+
demo.launch()
|