Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -8,31 +8,34 @@ import gradio as gr
|
|
8 |
from huggingface_hub import HfApi, snapshot_download, ModelInfo, list_models
|
9 |
from enum import Enum
|
10 |
|
11 |
-
|
12 |
OWNER = "AIEnergyScore"
|
13 |
COMPUTE_SPACE = f"{OWNER}/launch-computation-example"
|
14 |
-
|
15 |
-
|
16 |
TOKEN = os.environ.get("DEBUG")
|
17 |
API = HfApi(token=TOKEN)
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
-
|
21 |
-
task_mappings = {'automatic speech recognition':'automatic-speech-recognition', 'Object Detection': 'object-detection', 'Text Classification': 'text-classification',
|
22 |
-
'Image to Text':'image-to-text', 'Question Answering':'question-answering', 'Text Generation': 'text-generation',
|
23 |
-
'Image Classification':'image-classification', 'Sentence Similarity': 'sentence-similarity',
|
24 |
-
'Image Generation':'image-generation', 'Summarization':'summarization'}
|
25 |
@dataclass
|
26 |
class ModelDetails:
|
27 |
name: str
|
28 |
display_name: str = ""
|
29 |
-
symbol: str = ""
|
30 |
|
31 |
def start_compute_space():
|
32 |
API.restart_space(COMPUTE_SPACE)
|
33 |
gr.Info(f"Okay! {COMPUTE_SPACE} should be running now!")
|
34 |
|
35 |
-
|
36 |
def get_model_size(model_info: ModelInfo):
|
37 |
"""Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
|
38 |
try:
|
@@ -48,16 +51,15 @@ def add_docker_eval(zip_file):
|
|
48 |
API.upload_file(
|
49 |
path_or_fileobj=zip_file,
|
50 |
repo_id="AIEnergyScore/tested_proprietary_models",
|
51 |
-
path_in_repo='submitted_models/'+new_fid,
|
52 |
repo_type="dataset",
|
53 |
commit_message="Adding logs via submission Space.",
|
54 |
token=TOKEN
|
55 |
-
|
56 |
gr.Info('Uploaded logs to dataset! We will validate their validity and add them to the next version of the leaderboard.')
|
57 |
else:
|
58 |
gr.Info('You can only upload .zip files here!')
|
59 |
|
60 |
-
|
61 |
def add_new_eval(repo_id: str, task: str):
|
62 |
model_owner = repo_id.split("/")[0]
|
63 |
model_name = repo_id.split("/")[1]
|
@@ -91,10 +93,8 @@ def add_new_eval(repo_id: str, task: str):
|
|
91 |
"task": task_mappings[task],
|
92 |
"likes": likes,
|
93 |
"params": model_size,
|
94 |
-
"leaderboard_version": "v0",
|
95 |
-
|
96 |
-
#"private": False,
|
97 |
-
#}
|
98 |
|
99 |
print("Writing out request file to dataset")
|
100 |
df_request_dict = pd.DataFrame([request_dict])
|
@@ -106,11 +106,10 @@ def add_new_eval(repo_id: str, task: str):
|
|
106 |
gr.Info("Starting compute space at %s " % COMPUTE_SPACE)
|
107 |
return start_compute_space()
|
108 |
|
109 |
-
|
110 |
def print_existing_models():
|
111 |
-
requests= load_dataset("AIEnergyScore/requests_debug", split="test", token=TOKEN)
|
112 |
requests_dset = requests.to_pandas()
|
113 |
-
model_df= requests_dset[['model', 'status']]
|
114 |
model_df = model_df[model_df['status'] == 'COMPLETED']
|
115 |
return model_df
|
116 |
|
@@ -132,15 +131,62 @@ def get_leaderboard_models():
|
|
132 |
for filename in filenames:
|
133 |
data.append(pd.read_csv(filename))
|
134 |
leaderboard_data = pd.concat(data, ignore_index=True)
|
135 |
-
return leaderboard_data[['model','task']]
|
136 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
|
138 |
with gr.Blocks() as demo:
|
139 |
-
|
140 |
-
gr.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
gr.Markdown("### If you want us to evaluate a model hosted on the 🤗 Hub, enter the model ID and choose the corresponding task from the dropdown list below, then click **Run Analysis** to launch the benchmarking process.")
|
142 |
gr.Markdown("### If you've used the [Docker file](https://github.com/huggingface/AIEnergyScore/) to run your own evaluation, please submit the resulting log files at the bottom of the page.")
|
143 |
-
gr.Markdown("### The [Project Leaderboard](https://huggingface.co/spaces/AIEnergyScore/Leaderboard) will be updated
|
|
|
144 |
with gr.Row():
|
145 |
with gr.Column():
|
146 |
task = gr.Dropdown(
|
@@ -152,19 +198,17 @@ with gr.Blocks() as demo:
|
|
152 |
)
|
153 |
with gr.Column():
|
154 |
model_name_textbox = gr.Textbox(label="Model name (user_name/model_name)")
|
155 |
-
|
156 |
with gr.Row():
|
157 |
with gr.Column():
|
158 |
submit_button = gr.Button("Submit for Analysis")
|
159 |
submission_result = gr.Markdown()
|
160 |
submit_button.click(
|
161 |
fn=add_new_eval,
|
162 |
-
inputs=[
|
163 |
-
model_name_textbox,
|
164 |
-
task,
|
165 |
-
],
|
166 |
outputs=submission_result,
|
167 |
)
|
|
|
168 |
with gr.Row():
|
169 |
with gr.Column():
|
170 |
with gr.Accordion("Submit log files from a Docker run:", open=False):
|
@@ -176,19 +220,21 @@ with gr.Blocks() as demo:
|
|
176 |
file_output = gr.File(visible=False)
|
177 |
u = gr.UploadButton("Upload a zip file with logs", file_count="single", interactive=False)
|
178 |
u.upload(add_docker_eval, u, file_output)
|
179 |
-
|
180 |
def update_upload_button_interactive(checkbox_value):
|
181 |
return gr.UploadButton.update(interactive=checkbox_value)
|
182 |
-
|
183 |
agreement_checkbox.change(
|
184 |
fn=update_upload_button_interactive,
|
185 |
inputs=[agreement_checkbox],
|
186 |
outputs=[u]
|
187 |
)
|
|
|
188 |
with gr.Row():
|
189 |
with gr.Column():
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
|
|
194 |
demo.launch()
|
|
|
8 |
from huggingface_hub import HfApi, snapshot_download, ModelInfo, list_models
|
9 |
from enum import Enum
|
10 |
|
|
|
11 |
OWNER = "AIEnergyScore"
|
12 |
COMPUTE_SPACE = f"{OWNER}/launch-computation-example"
|
|
|
|
|
13 |
TOKEN = os.environ.get("DEBUG")
|
14 |
API = HfApi(token=TOKEN)
|
15 |
|
16 |
+
task_mappings = {
|
17 |
+
'automatic speech recognition': 'automatic-speech-recognition',
|
18 |
+
'Object Detection': 'object-detection',
|
19 |
+
'Text Classification': 'text-classification',
|
20 |
+
'Image to Text': 'image-to-text',
|
21 |
+
'Question Answering': 'question-answering',
|
22 |
+
'Text Generation': 'text-generation',
|
23 |
+
'Image Classification': 'image-classification',
|
24 |
+
'Sentence Similarity': 'sentence-similarity',
|
25 |
+
'Image Generation': 'image-generation',
|
26 |
+
'Summarization': 'summarization'
|
27 |
+
}
|
28 |
|
|
|
|
|
|
|
|
|
|
|
29 |
@dataclass
|
30 |
class ModelDetails:
|
31 |
name: str
|
32 |
display_name: str = ""
|
33 |
+
symbol: str = "" # emoji
|
34 |
|
35 |
def start_compute_space():
|
36 |
API.restart_space(COMPUTE_SPACE)
|
37 |
gr.Info(f"Okay! {COMPUTE_SPACE} should be running now!")
|
38 |
|
|
|
39 |
def get_model_size(model_info: ModelInfo):
|
40 |
"""Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
|
41 |
try:
|
|
|
51 |
API.upload_file(
|
52 |
path_or_fileobj=zip_file,
|
53 |
repo_id="AIEnergyScore/tested_proprietary_models",
|
54 |
+
path_in_repo='submitted_models/' + new_fid,
|
55 |
repo_type="dataset",
|
56 |
commit_message="Adding logs via submission Space.",
|
57 |
token=TOKEN
|
58 |
+
)
|
59 |
gr.Info('Uploaded logs to dataset! We will validate their validity and add them to the next version of the leaderboard.')
|
60 |
else:
|
61 |
gr.Info('You can only upload .zip files here!')
|
62 |
|
|
|
63 |
def add_new_eval(repo_id: str, task: str):
|
64 |
model_owner = repo_id.split("/")[0]
|
65 |
model_name = repo_id.split("/")[1]
|
|
|
93 |
"task": task_mappings[task],
|
94 |
"likes": likes,
|
95 |
"params": model_size,
|
96 |
+
"leaderboard_version": "v0",
|
97 |
+
}
|
|
|
|
|
98 |
|
99 |
print("Writing out request file to dataset")
|
100 |
df_request_dict = pd.DataFrame([request_dict])
|
|
|
106 |
gr.Info("Starting compute space at %s " % COMPUTE_SPACE)
|
107 |
return start_compute_space()
|
108 |
|
|
|
109 |
def print_existing_models():
|
110 |
+
requests = load_dataset("AIEnergyScore/requests_debug", split="test", token=TOKEN)
|
111 |
requests_dset = requests.to_pandas()
|
112 |
+
model_df = requests_dset[['model', 'status']]
|
113 |
model_df = model_df[model_df['status'] == 'COMPLETED']
|
114 |
return model_df
|
115 |
|
|
|
131 |
for filename in filenames:
|
132 |
data.append(pd.read_csv(filename))
|
133 |
leaderboard_data = pd.concat(data, ignore_index=True)
|
134 |
+
return leaderboard_data[['model', 'task']]
|
135 |
|
136 |
+
# A placeholder for get_zip_data_link() -- replace with your actual implementation if available.
|
137 |
+
def get_zip_data_link():
|
138 |
+
return (
|
139 |
+
'<a href="https://example.com/download.zip" '
|
140 |
+
'style="margin: 0 10px; text-decoration: none; font-weight: bold; font-size: 1.1em; '
|
141 |
+
'color: black; font-family: \'Inter\', sans-serif;">Download Logs</a>'
|
142 |
+
)
|
143 |
|
144 |
with gr.Blocks() as demo:
|
145 |
+
# --- Header Links (at the very top) ---
|
146 |
+
with gr.Row():
|
147 |
+
leaderboard_link = gr.HTML(
|
148 |
+
'<a href="https://huggingface.co/spaces/AIEnergyScore/Leaderboard" '
|
149 |
+
'style="margin: 0 10px; text-decoration: none; font-weight: bold; font-size: 1.1em; '
|
150 |
+
'color: black; font-family: \'Inter\', sans-serif;">Leaderboard</a>'
|
151 |
+
)
|
152 |
+
label_link = gr.HTML(
|
153 |
+
'<a href="https://huggingface.co/spaces/AIEnergyScore/Label" '
|
154 |
+
'style="margin: 0 10px; text-decoration: none; font-weight: bold; font-size: 1.1em; '
|
155 |
+
'color: black; font-family: \'Inter\', sans-serif;">Label Generator</a>'
|
156 |
+
)
|
157 |
+
faq_link = gr.HTML(
|
158 |
+
'<a href="https://huggingface.github.io/AIEnergyScore/#faq" '
|
159 |
+
'style="margin: 0 10px; text-decoration: none; font-weight: bold; font-size: 1.1em; '
|
160 |
+
'color: black; font-family: \'Inter\', sans-serif;">FAQ</a>'
|
161 |
+
)
|
162 |
+
documentation_link = gr.HTML(
|
163 |
+
'<a href="https://huggingface.github.io/AIEnergyScore/#documentation" '
|
164 |
+
'style="margin: 0 10px; text-decoration: none; font-weight: bold; font-size: 1.1em; '
|
165 |
+
'color: black; font-family: \'Inter\', sans-serif;">Documentation</a>'
|
166 |
+
)
|
167 |
+
download_link = gr.HTML(get_zip_data_link())
|
168 |
+
community_link = gr.HTML(
|
169 |
+
'<a href="https://huggingface.co/spaces/AIEnergyScore/README/discussions" '
|
170 |
+
'style="margin: 0 10px; text-decoration: none; font-weight: bold; font-size: 1.1em; '
|
171 |
+
'color: black; font-family: \'Inter\', sans-serif;">Community</a>'
|
172 |
+
)
|
173 |
+
|
174 |
+
# --- Logo (centered) ---
|
175 |
+
gr.HTML('''
|
176 |
+
<div style="margin-top: 0px;">
|
177 |
+
<img src="https://huggingface.co/spaces/AIEnergyScore/Leaderboard/resolve/main/logo.png"
|
178 |
+
alt="Logo"
|
179 |
+
style="display: block; margin: 0 auto; max-width: 500px; height: auto;">
|
180 |
+
</div>
|
181 |
+
''')
|
182 |
+
|
183 |
+
# --- Main UI ---
|
184 |
+
gr.Markdown("# Submission Portal")
|
185 |
+
gr.Markdown("### The goal of the AI Energy Score project is to develop an energy-based rating system for AI model deployment that will guide members of the community in choosing models for different tasks based on energy efficiency.")
|
186 |
gr.Markdown("### If you want us to evaluate a model hosted on the 🤗 Hub, enter the model ID and choose the corresponding task from the dropdown list below, then click **Run Analysis** to launch the benchmarking process.")
|
187 |
gr.Markdown("### If you've used the [Docker file](https://github.com/huggingface/AIEnergyScore/) to run your own evaluation, please submit the resulting log files at the bottom of the page.")
|
188 |
+
gr.Markdown("### The [Project Leaderboard](https://huggingface.co/spaces/AIEnergyScore/Leaderboard) will be updated on a biannual basis (last updated in February 2025).")
|
189 |
+
|
190 |
with gr.Row():
|
191 |
with gr.Column():
|
192 |
task = gr.Dropdown(
|
|
|
198 |
)
|
199 |
with gr.Column():
|
200 |
model_name_textbox = gr.Textbox(label="Model name (user_name/model_name)")
|
201 |
+
|
202 |
with gr.Row():
|
203 |
with gr.Column():
|
204 |
submit_button = gr.Button("Submit for Analysis")
|
205 |
submission_result = gr.Markdown()
|
206 |
submit_button.click(
|
207 |
fn=add_new_eval,
|
208 |
+
inputs=[model_name_textbox, task],
|
|
|
|
|
|
|
209 |
outputs=submission_result,
|
210 |
)
|
211 |
+
|
212 |
with gr.Row():
|
213 |
with gr.Column():
|
214 |
with gr.Accordion("Submit log files from a Docker run:", open=False):
|
|
|
220 |
file_output = gr.File(visible=False)
|
221 |
u = gr.UploadButton("Upload a zip file with logs", file_count="single", interactive=False)
|
222 |
u.upload(add_docker_eval, u, file_output)
|
223 |
+
|
224 |
def update_upload_button_interactive(checkbox_value):
|
225 |
return gr.UploadButton.update(interactive=checkbox_value)
|
226 |
+
|
227 |
agreement_checkbox.change(
|
228 |
fn=update_upload_button_interactive,
|
229 |
inputs=[agreement_checkbox],
|
230 |
outputs=[u]
|
231 |
)
|
232 |
+
|
233 |
with gr.Row():
|
234 |
with gr.Column():
|
235 |
+
with gr.Accordion("Models that are in the latest leaderboard version:", open=False, visible=False):
|
236 |
+
gr.Dataframe(get_leaderboard_models())
|
237 |
+
with gr.Accordion("Models that have been benchmarked recently:", open=False, visible=False):
|
238 |
+
gr.Dataframe(formatted_df)
|
239 |
+
|
240 |
demo.launch()
|