File size: 6,035 Bytes
10e9b7d
 
eccf8e4
7d65c66
3c4371f
c43e884
999e116
c43e884
 
d916609
3db6293
e80aab9
e90a59c
ea8d34e
e90a59c
f5312d4
c43e884
31243f4
49aa693
2ba19e9
e7545b5
d1458e5
 
 
31243f4
 
f5312d4
 
36ed51a
3c4371f
c43e884
eccf8e4
31243f4
7d65c66
31243f4
 
f5312d4
7d65c66
c43e884
e80aab9
7d65c66
 
582b41a
 
31243f4
 
 
 
 
4d674d3
582b41a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4d674d3
31243f4
 
 
c43e884
 
7d65c66
e80aab9
7d65c66
e80aab9
 
31243f4
e80aab9
 
3c4371f
 
 
e80aab9
31243f4
 
7d65c66
c43e884
31243f4
 
e80aab9
 
31243f4
c43e884
 
 
 
 
 
 
 
 
e80aab9
7e4a06b
e80aab9
31243f4
9088b99
7d65c66
e80aab9
c43e884
e80aab9
 
3c4371f
 
c43e884
7d65c66
3c4371f
c43e884
 
 
 
 
582b41a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
import os
import gradio as gr
import requests
import inspect
import pandas as pd
import asyncio
from smolagents import ToolCallingAgent, InferenceClientModel, HfApiModel
from smolagents import DuckDuckGoSearchTool, Tool, CodeAgent
from huggingface_hub import login

DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"

login(token=os.environ["HUGGINGFACEHUB_API_TOKEN"])

search_tool = DuckDuckGoSearchTool()

async def run_and_submit_all(profile: gr.OAuthProfile | None):
    try:
        agent = CodeAgent(
            tools=[search_tool],
            model=InferenceClientModel(model="mistralai/Magistral-Small-2506"),
            max_steps=5,
            verbosity_level=2        
        )
    except Exception as e:
        return f"Error initializing agent: {e}", None

    space_id = os.getenv("SPACE_ID")
    agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"

    questions_url = f"{DEFAULT_API_URL}/questions"
    try:
        response = requests.get(questions_url, timeout=15)
        response.raise_for_status()
        questions_data = response.json()
        if not questions_data:
            return "Fetched questions list is empty or invalid format.", None
    except Exception as e:
        return f"Error fetching questions: {e}", None

    results_log = []
    answers_payload = []
    loop = asyncio.get_event_loop()

    for item in questions_data:
        task_id = item.get("task_id")
        question_text = item.get("question")
        if not task_id or question_text is None:
            continue

        try:
            system_prompt = (
                "You are a general AI assistant. I will ask you a question. "
                "Report your thoughts, and finish your answer with the following template: "
                "FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. "
                "If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. "
                "If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. "
                "If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.\n\n"
            )
            full_prompt = system_prompt + f"Question: {question_text.strip()}"

            agent_result = await loop.run_in_executor(None, agent, full_prompt)

            # Try to extract final answer depending on type of result
            if isinstance(agent_result, dict) and "final_answer" in agent_result:
                final_answer = str(agent_result["final_answer"]).strip()
            elif isinstance(agent_result, str) and "FINAL ANSWER:" in agent_result:
                _, final_answer = agent_result.rsplit("FINAL ANSWER:", 1)
                final_answer = final_answer.strip()
            else:
                final_answer = str(agent_result).strip()

            answers_payload.append({
                "task_id": task_id,
                "model_answer": final_answer
            })

            results_log.append({
                "Task ID": task_id,
                "Question": question_text,
                "Submitted Answer": final_answer
            })

        except Exception as e:
            print(f"Error running agent on task {task_id}: {e}")
            results_log.append({
                "Task ID": task_id,
                "Question": question_text,
                "Submitted Answer": f"AGENT ERROR: {e}"
            })

    if not answers_payload:
        return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)

    username = profile.username if profile else "unknown"
    submit_url = f"{DEFAULT_API_URL}/submit"
    submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
    try:
        response = requests.post(submit_url, json=submission_data, timeout=60)
        response.raise_for_status()
        result_data = response.json()
        final_status = (
            f"Submission Successful!\n"
            f"User: {result_data.get('username')}\n"
            f"Overall Score: {result_data.get('score', 'N/A')}% "
            f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
            f"Message: {result_data.get('message', 'No message received.')}"
        )
        results_df = pd.DataFrame(results_log)
        return final_status, results_df
    except Exception as e:
        status_message = f"Submission Failed: {e}"
        results_df = pd.DataFrame(results_log)
        return status_message, results_df

with gr.Blocks() as demo:
    gr.Markdown("# Basic Agent Evaluation Runner")
    gr.Markdown("""
    **Instructions:**
    1. Clone this space and define your agent logic.
    2. Log in to your Hugging Face account.
    3. Click 'Run Evaluation & Submit All Answers'.
    ---
    **Note:**
    The run may take time. Async is now used to improve responsiveness.
    """)

    gr.LoginButton()

    run_button = gr.Button("Run Evaluation & Submit All Answers")
    status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
    results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)

    run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])

if __name__ == "__main__":
    print("\n" + "-"*30 + " App Starting " + "-"*30)
    space_host_startup = os.getenv("SPACE_HOST")
    space_id_startup = os.getenv("SPACE_ID")

    if space_host_startup:
        print(f"✅ SPACE_HOST: https://{space_host_startup}.hf.space")
    if space_id_startup:
        print(f"✅ SPACE_ID: https://huggingface.co/spaces/{space_id_startup}")

    print("Launching Gradio Interface...")
    demo.launch(debug=True, share=False)