Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -136,11 +136,11 @@ def get_leaderboard_models():
|
|
136 |
|
137 |
|
138 |
with gr.Blocks() as demo:
|
139 |
-
gr.Markdown("# Energy Score Submission Portal
|
140 |
gr.Markdown("### The goal of the AI Energy Score project is to develop an energy-based rating system for AI model deployment that will guide members of the community in choosing models for different tasks based on energy efficiency.", elem_classes="markdown-text")
|
141 |
gr.Markdown("### If you want us to evaluate a model hosted on the π€ Hub, enter the model ID and choose the corresponding task from the dropdown list below, then click **Run Analysis** to launch the benchmarking process.")
|
142 |
-
gr.Markdown("### If you've used the [Docker file](https://github.com/huggingface/
|
143 |
-
gr.Markdown("### The [Project Leaderboard](https://huggingface.co/spaces/
|
144 |
with gr.Row():
|
145 |
with gr.Column():
|
146 |
task = gr.Dropdown(
|
@@ -155,7 +155,7 @@ with gr.Blocks() as demo:
|
|
155 |
|
156 |
with gr.Row():
|
157 |
with gr.Column():
|
158 |
-
submit_button = gr.Button("
|
159 |
submission_result = gr.Markdown()
|
160 |
submit_button.click(
|
161 |
fn=add_new_eval,
|
@@ -165,17 +165,17 @@ with gr.Blocks() as demo:
|
|
165 |
],
|
166 |
outputs=submission_result,
|
167 |
)
|
168 |
-
|
169 |
-
with gr.Column():
|
170 |
-
with gr.Accordion("Submit log files from a Docker run:", open=False):
|
171 |
-
gr.Markdown("If you've already benchmarked your model using the [Docker file](https://github.com/huggingface/EnergyStarAI/) provided, please upload the **entire run log directory** (in .zip format) below:")
|
172 |
-
file_output = gr.File(visible=False)
|
173 |
-
u = gr.UploadButton("Upload a zip file with logs", file_count="single")
|
174 |
-
u.upload(add_docker_eval, u, file_output)
|
175 |
-
with gr.Row():
|
176 |
-
with gr.Column():
|
177 |
-
with gr.Accordion("Models that are in the latest leaderboard version:", open=False):
|
178 |
-
gr.Dataframe(get_leaderboard_models())
|
179 |
-
with gr.Accordion("Models that have been benchmarked recently:", open=False):
|
180 |
-
gr.Dataframe(formatted_df)
|
181 |
demo.launch()
|
|
|
136 |
|
137 |
|
138 |
with gr.Blocks() as demo:
|
139 |
+
gr.Markdown("# AI Energy Score | Submission Portal")
|
140 |
gr.Markdown("### The goal of the AI Energy Score project is to develop an energy-based rating system for AI model deployment that will guide members of the community in choosing models for different tasks based on energy efficiency.", elem_classes="markdown-text")
|
141 |
gr.Markdown("### If you want us to evaluate a model hosted on the π€ Hub, enter the model ID and choose the corresponding task from the dropdown list below, then click **Run Analysis** to launch the benchmarking process.")
|
142 |
+
gr.Markdown("### If you've used the [Docker file](https://github.com/huggingface/AIEnergyScore/) to run your own evaluation, please submit the resulting log files at the bottom of the page.")
|
143 |
+
gr.Markdown("### The [Project Leaderboard](https://huggingface.co/spaces/AIEnergyScore/Leaderboard) will be updated twice a year.")
|
144 |
with gr.Row():
|
145 |
with gr.Column():
|
146 |
task = gr.Dropdown(
|
|
|
155 |
|
156 |
with gr.Row():
|
157 |
with gr.Column():
|
158 |
+
submit_button = gr.Button("Submit for Analysis")
|
159 |
submission_result = gr.Markdown()
|
160 |
submit_button.click(
|
161 |
fn=add_new_eval,
|
|
|
165 |
],
|
166 |
outputs=submission_result,
|
167 |
)
|
168 |
+
# with gr.Row():
|
169 |
+
# with gr.Column():
|
170 |
+
# with gr.Accordion("Submit log files from a Docker run:", open=False):
|
171 |
+
# gr.Markdown("If you've already benchmarked your model using the [Docker file](https://github.com/huggingface/EnergyStarAI/) provided, please upload the **entire run log directory** (in .zip format) below:")
|
172 |
+
# file_output = gr.File(visible=False)
|
173 |
+
# u = gr.UploadButton("Upload a zip file with logs", file_count="single")
|
174 |
+
# u.upload(add_docker_eval, u, file_output)
|
175 |
+
# with gr.Row():
|
176 |
+
# with gr.Column():
|
177 |
+
# with gr.Accordion("Models that are in the latest leaderboard version:", open=False):
|
178 |
+
# gr.Dataframe(get_leaderboard_models())
|
179 |
+
# with gr.Accordion("Models that have been benchmarked recently:", open=False):
|
180 |
+
# gr.Dataframe(formatted_df)
|
181 |
demo.launch()
|