Update
Browse files- README.md +1 -1
- app.py +17 -127
- src/about.py +12 -52
- src/display/formatting.py +2 -2
- src/display/utils.py +0 -16
- src/envs.py +7 -9
- src/leaderboard/read_evals.py +56 -159
- src/populate.py +4 -40
- src/submission/check_validity.py +0 -99
- src/submission/submit.py +0 -119
README.md
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
emoji: 🥇
|
4 |
colorFrom: green
|
5 |
colorTo: indigo
|
|
|
1 |
---
|
2 |
+
title: LLVM APR Benchmark Leaderboard
|
3 |
emoji: 🥇
|
4 |
colorFrom: green
|
5 |
colorTo: indigo
|
app.py
CHANGED
@@ -3,59 +3,47 @@ from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
|
|
3 |
import pandas as pd
|
4 |
from apscheduler.schedulers.background import BackgroundScheduler
|
5 |
from huggingface_hub import snapshot_download
|
|
|
6 |
|
7 |
from src.about import (
|
8 |
CITATION_BUTTON_LABEL,
|
9 |
CITATION_BUTTON_TEXT,
|
10 |
EVALUATION_QUEUE_TEXT,
|
11 |
INTRODUCTION_TEXT,
|
12 |
-
LLM_BENCHMARKS_TEXT,
|
13 |
TITLE,
|
14 |
)
|
15 |
from src.display.css_html_js import custom_css
|
16 |
from src.display.utils import (
|
17 |
BENCHMARK_COLS,
|
18 |
COLS,
|
19 |
-
EVAL_COLS,
|
20 |
-
EVAL_TYPES,
|
21 |
AutoEvalColumn,
|
22 |
-
ModelType,
|
23 |
fields,
|
24 |
-
WeightType,
|
25 |
-
Precision
|
26 |
)
|
27 |
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
|
28 |
-
from src.populate import
|
29 |
-
from src.submission.submit import add_new_eval
|
30 |
|
31 |
|
32 |
def restart_space():
|
33 |
API.restart_space(repo_id=REPO_ID)
|
34 |
|
|
|
35 |
### Space initialisation
|
36 |
try:
|
37 |
print(EVAL_REQUESTS_PATH)
|
38 |
snapshot_download(
|
39 |
-
repo_id=QUEUE_REPO,
|
|
|
|
|
|
|
|
|
|
|
40 |
)
|
41 |
except Exception:
|
42 |
restart_space()
|
43 |
-
try:
|
44 |
-
print(EVAL_RESULTS_PATH)
|
45 |
-
snapshot_download(
|
46 |
-
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
|
47 |
-
)
|
48 |
-
except Exception:
|
49 |
-
restart_space()
|
50 |
-
|
51 |
|
52 |
-
|
|
|
53 |
|
54 |
-
(
|
55 |
-
finished_eval_queue_df,
|
56 |
-
running_eval_queue_df,
|
57 |
-
pending_eval_queue_df,
|
58 |
-
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
|
59 |
|
60 |
def init_leaderboard(dataframe):
|
61 |
if dataframe is None or dataframe.empty:
|
@@ -68,21 +56,10 @@ def init_leaderboard(dataframe):
|
|
68 |
cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
|
69 |
label="Select Columns to Display:",
|
70 |
),
|
71 |
-
search_columns=[AutoEvalColumn.
|
72 |
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
|
73 |
filter_columns=[
|
74 |
-
ColumnFilter(AutoEvalColumn.
|
75 |
-
ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
|
76 |
-
ColumnFilter(
|
77 |
-
AutoEvalColumn.params.name,
|
78 |
-
type="slider",
|
79 |
-
min=0.01,
|
80 |
-
max=150,
|
81 |
-
label="Select the number of parameters (B)",
|
82 |
-
),
|
83 |
-
ColumnFilter(
|
84 |
-
AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
|
85 |
-
),
|
86 |
],
|
87 |
bool_checkboxgroup_label="Hide models",
|
88 |
interactive=False,
|
@@ -92,101 +69,14 @@ def init_leaderboard(dataframe):
|
|
92 |
demo = gr.Blocks(css=custom_css)
|
93 |
with demo:
|
94 |
gr.HTML(TITLE)
|
95 |
-
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
96 |
|
97 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
98 |
with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
|
99 |
leaderboard = init_leaderboard(LEADERBOARD_DF)
|
100 |
|
101 |
-
with gr.TabItem("
|
102 |
-
gr.Markdown(
|
103 |
-
|
104 |
-
with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|
105 |
-
with gr.Column():
|
106 |
-
with gr.Row():
|
107 |
-
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
108 |
-
|
109 |
-
with gr.Column():
|
110 |
-
with gr.Accordion(
|
111 |
-
f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
|
112 |
-
open=False,
|
113 |
-
):
|
114 |
-
with gr.Row():
|
115 |
-
finished_eval_table = gr.components.Dataframe(
|
116 |
-
value=finished_eval_queue_df,
|
117 |
-
headers=EVAL_COLS,
|
118 |
-
datatype=EVAL_TYPES,
|
119 |
-
row_count=5,
|
120 |
-
)
|
121 |
-
with gr.Accordion(
|
122 |
-
f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
|
123 |
-
open=False,
|
124 |
-
):
|
125 |
-
with gr.Row():
|
126 |
-
running_eval_table = gr.components.Dataframe(
|
127 |
-
value=running_eval_queue_df,
|
128 |
-
headers=EVAL_COLS,
|
129 |
-
datatype=EVAL_TYPES,
|
130 |
-
row_count=5,
|
131 |
-
)
|
132 |
-
|
133 |
-
with gr.Accordion(
|
134 |
-
f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
|
135 |
-
open=False,
|
136 |
-
):
|
137 |
-
with gr.Row():
|
138 |
-
pending_eval_table = gr.components.Dataframe(
|
139 |
-
value=pending_eval_queue_df,
|
140 |
-
headers=EVAL_COLS,
|
141 |
-
datatype=EVAL_TYPES,
|
142 |
-
row_count=5,
|
143 |
-
)
|
144 |
-
with gr.Row():
|
145 |
-
gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
|
146 |
-
|
147 |
-
with gr.Row():
|
148 |
-
with gr.Column():
|
149 |
-
model_name_textbox = gr.Textbox(label="Model name")
|
150 |
-
revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
|
151 |
-
model_type = gr.Dropdown(
|
152 |
-
choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
|
153 |
-
label="Model type",
|
154 |
-
multiselect=False,
|
155 |
-
value=None,
|
156 |
-
interactive=True,
|
157 |
-
)
|
158 |
-
|
159 |
-
with gr.Column():
|
160 |
-
precision = gr.Dropdown(
|
161 |
-
choices=[i.value.name for i in Precision if i != Precision.Unknown],
|
162 |
-
label="Precision",
|
163 |
-
multiselect=False,
|
164 |
-
value="float16",
|
165 |
-
interactive=True,
|
166 |
-
)
|
167 |
-
weight_type = gr.Dropdown(
|
168 |
-
choices=[i.value.name for i in WeightType],
|
169 |
-
label="Weights type",
|
170 |
-
multiselect=False,
|
171 |
-
value="Original",
|
172 |
-
interactive=True,
|
173 |
-
)
|
174 |
-
base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
|
175 |
-
|
176 |
-
submit_button = gr.Button("Submit Eval")
|
177 |
-
submission_result = gr.Markdown()
|
178 |
-
submit_button.click(
|
179 |
-
add_new_eval,
|
180 |
-
[
|
181 |
-
model_name_textbox,
|
182 |
-
base_model_name_textbox,
|
183 |
-
revision_name_textbox,
|
184 |
-
precision,
|
185 |
-
weight_type,
|
186 |
-
model_type,
|
187 |
-
],
|
188 |
-
submission_result,
|
189 |
-
)
|
190 |
|
191 |
with gr.Row():
|
192 |
with gr.Accordion("📙 Citation", open=False):
|
@@ -201,4 +91,4 @@ with demo:
|
|
201 |
scheduler = BackgroundScheduler()
|
202 |
scheduler.add_job(restart_space, "interval", seconds=1800)
|
203 |
scheduler.start()
|
204 |
-
demo.queue(default_concurrency_limit=40).launch()
|
|
|
3 |
import pandas as pd
|
4 |
from apscheduler.schedulers.background import BackgroundScheduler
|
5 |
from huggingface_hub import snapshot_download
|
6 |
+
from datasets import load_dataset
|
7 |
|
8 |
from src.about import (
|
9 |
CITATION_BUTTON_LABEL,
|
10 |
CITATION_BUTTON_TEXT,
|
11 |
EVALUATION_QUEUE_TEXT,
|
12 |
INTRODUCTION_TEXT,
|
|
|
13 |
TITLE,
|
14 |
)
|
15 |
from src.display.css_html_js import custom_css
|
16 |
from src.display.utils import (
|
17 |
BENCHMARK_COLS,
|
18 |
COLS,
|
|
|
|
|
19 |
AutoEvalColumn,
|
|
|
20 |
fields,
|
|
|
|
|
21 |
)
|
22 |
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
|
23 |
+
from src.populate import get_leaderboard_df
|
|
|
24 |
|
25 |
|
26 |
def restart_space():
|
27 |
API.restart_space(repo_id=REPO_ID)
|
28 |
|
29 |
+
|
30 |
### Space initialisation
|
31 |
try:
|
32 |
print(EVAL_REQUESTS_PATH)
|
33 |
snapshot_download(
|
34 |
+
repo_id=QUEUE_REPO,
|
35 |
+
local_dir=EVAL_REQUESTS_PATH,
|
36 |
+
repo_type="dataset",
|
37 |
+
tqdm_class=None,
|
38 |
+
etag_timeout=30,
|
39 |
+
token=TOKEN,
|
40 |
)
|
41 |
except Exception:
|
42 |
restart_space()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
+
total_issues = load_dataset("dtcxzyw/llvm-apr-benchmark").num_rows["test"]
|
45 |
+
LEADERBOARD_DF = get_leaderboard_df(EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
46 |
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
def init_leaderboard(dataframe):
|
49 |
if dataframe is None or dataframe.empty:
|
|
|
56 |
cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
|
57 |
label="Select Columns to Display:",
|
58 |
),
|
59 |
+
search_columns=[AutoEvalColumn.method_name.name, AutoEvalColumn.model_name.name],
|
60 |
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
|
61 |
filter_columns=[
|
62 |
+
ColumnFilter(AutoEvalColumn.with_hint.name, type="checkboxgroup", label="With hint"),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
],
|
64 |
bool_checkboxgroup_label="Hide models",
|
65 |
interactive=False,
|
|
|
69 |
demo = gr.Blocks(css=custom_css)
|
70 |
with demo:
|
71 |
gr.HTML(TITLE)
|
72 |
+
gr.Markdown(INTRODUCTION_TEXT + f"\nTotal issues: {total_issues}\n", elem_classes="markdown-text")
|
73 |
|
74 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
75 |
with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
|
76 |
leaderboard = init_leaderboard(LEADERBOARD_DF)
|
77 |
|
78 |
+
with gr.TabItem("🚀 Submission", elem_id="llm-benchmark-tab-table", id=3):
|
79 |
+
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
|
81 |
with gr.Row():
|
82 |
with gr.Accordion("📙 Citation", open=False):
|
|
|
91 |
scheduler = BackgroundScheduler()
|
92 |
scheduler.add_job(restart_space, "interval", seconds=1800)
|
93 |
scheduler.start()
|
94 |
+
demo.queue(default_concurrency_limit=40).launch()
|
src/about.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
from dataclasses import dataclass
|
2 |
from enum import Enum
|
|
|
3 |
|
4 |
@dataclass
|
5 |
class Task:
|
@@ -7,66 +8,25 @@ class Task:
|
|
7 |
metric: str
|
8 |
col_name: str
|
9 |
|
10 |
-
|
11 |
-
# Select your tasks here
|
12 |
-
# ---------------------------------------------------
|
13 |
-
class Tasks(Enum):
|
14 |
-
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
|
15 |
-
task0 = Task("anli_r1", "acc", "ANLI")
|
16 |
-
task1 = Task("logiqa", "acc_norm", "LogiQA")
|
17 |
-
|
18 |
-
NUM_FEWSHOT = 0 # Change with your few shot
|
19 |
-
# ---------------------------------------------------
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
# Your leaderboard name
|
24 |
-
TITLE = """<h1 align="center" id="space-title">
|
25 |
|
26 |
# What does your leaderboard evaluate?
|
27 |
INTRODUCTION_TEXT = """
|
28 |
-
|
29 |
"""
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
## Reproducibility
|
36 |
-
To reproduce our results, here is the commands you can run:
|
37 |
-
|
38 |
-
"""
|
39 |
-
|
40 |
-
EVALUATION_QUEUE_TEXT = """
|
41 |
-
## Some good practices before submitting a model
|
42 |
-
|
43 |
-
### 1) Make sure you can load your model and tokenizer using AutoClasses:
|
44 |
-
```python
|
45 |
-
from transformers import AutoConfig, AutoModel, AutoTokenizer
|
46 |
-
config = AutoConfig.from_pretrained("your model name", revision=revision)
|
47 |
-
model = AutoModel.from_pretrained("your model name", revision=revision)
|
48 |
-
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
|
49 |
-
```
|
50 |
-
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
|
51 |
-
|
52 |
-
Note: make sure your model is public!
|
53 |
-
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
|
54 |
-
|
55 |
-
### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
|
56 |
-
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
|
57 |
-
|
58 |
-
### 3) Make sure your model has an open license!
|
59 |
-
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
|
60 |
-
|
61 |
-
### 4) Fill up your model card
|
62 |
-
When we add extra information about models to the leaderboard, it will be automatically taken from the model card
|
63 |
-
|
64 |
-
## In case of model failure
|
65 |
-
If your model is displayed in the `FAILED` category, its execution stopped.
|
66 |
-
Make sure you have followed the above steps first.
|
67 |
-
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
|
68 |
"""
|
69 |
|
70 |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
71 |
CITATION_BUTTON_TEXT = r"""
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
"""
|
|
|
1 |
from dataclasses import dataclass
|
2 |
from enum import Enum
|
3 |
+
from envs import QUEUE_REPO
|
4 |
|
5 |
@dataclass
|
6 |
class Task:
|
|
|
8 |
metric: str
|
9 |
col_name: str
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
# Your leaderboard name
|
12 |
+
TITLE = """<h1 align="center" id="space-title">LLVM APR Benchmark Leaderboard</h1>"""
|
13 |
|
14 |
# What does your leaderboard evaluate?
|
15 |
INTRODUCTION_TEXT = """
|
16 |
+
Leaderboard for the [LLVM APR Benchmark](https://huggingface.co/datasets/dtcxzyw/llvm-apr-benchmark).
|
17 |
"""
|
18 |
|
19 |
+
EVALUATION_QUEUE_TEXT = f"""
|
20 |
+
With the provided evaluation environment, you can get a certificate by calling `env.dump()`.
|
21 |
+
Please submit your evaluation results generated by scripts/submit.py to {QUEUE_REPO}.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
"""
|
23 |
|
24 |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
25 |
CITATION_BUTTON_TEXT = r"""
|
26 |
+
@misc{llvm-apr-benchmark,
|
27 |
+
title = {LLVM APR Benchmark: A Large-Scale Automated Program Repair Benchmark of Real-World LLVM Middle-End Bugs},
|
28 |
+
url = {https://github.com/dtcxzyw/llvm-apr-benchmark},
|
29 |
+
author = {Yingwei Zheng},
|
30 |
+
year = {2025},
|
31 |
+
}
|
32 |
"""
|
src/display/formatting.py
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
-
def
|
2 |
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
3 |
|
4 |
|
5 |
def make_clickable_model(model_name):
|
6 |
link = f"https://huggingface.co/{model_name}"
|
7 |
-
return
|
8 |
|
9 |
|
10 |
def styled_error(error):
|
|
|
1 |
+
def make_hyperlink(link, model_name):
|
2 |
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
3 |
|
4 |
|
5 |
def make_clickable_model(model_name):
|
6 |
link = f"https://huggingface.co/{model_name}"
|
7 |
+
return make_hyperlink(link, model_name)
|
8 |
|
9 |
|
10 |
def styled_error(error):
|
src/display/utils.py
CHANGED
@@ -27,8 +27,6 @@ auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent(
|
|
27 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
28 |
#Scores
|
29 |
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
30 |
-
for task in Tasks:
|
31 |
-
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
32 |
# Model information
|
33 |
auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
|
34 |
auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
|
@@ -43,16 +41,6 @@ auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sh
|
|
43 |
# We use make dataclass to dynamically fill the scores from Tasks
|
44 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
45 |
|
46 |
-
## For the queue columns in the submission tab
|
47 |
-
@dataclass(frozen=True)
|
48 |
-
class EvalQueueColumn: # Queue column
|
49 |
-
model = ColumnContent("model", "markdown", True)
|
50 |
-
revision = ColumnContent("revision", "str", True)
|
51 |
-
private = ColumnContent("private", "bool", True)
|
52 |
-
precision = ColumnContent("precision", "str", True)
|
53 |
-
weight_type = ColumnContent("weight_type", "str", "Original")
|
54 |
-
status = ColumnContent("status", "str", True)
|
55 |
-
|
56 |
## All the model information that we might need
|
57 |
@dataclass
|
58 |
class ModelDetails:
|
@@ -103,8 +91,4 @@ class Precision(Enum):
|
|
103 |
# Column selection
|
104 |
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
105 |
|
106 |
-
EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
|
107 |
-
EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
|
108 |
-
|
109 |
BENCHMARK_COLS = [t.value.col_name for t in Tasks]
|
110 |
-
|
|
|
27 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
28 |
#Scores
|
29 |
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
|
|
|
|
30 |
# Model information
|
31 |
auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
|
32 |
auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
|
|
|
41 |
# We use make dataclass to dynamically fill the scores from Tasks
|
42 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
## All the model information that we might need
|
45 |
@dataclass
|
46 |
class ModelDetails:
|
|
|
91 |
# Column selection
|
92 |
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
93 |
|
|
|
|
|
|
|
94 |
BENCHMARK_COLS = [t.value.col_name for t in Tasks]
|
|
src/envs.py
CHANGED
@@ -4,22 +4,20 @@ from huggingface_hub import HfApi
|
|
4 |
|
5 |
# Info to change for your repository
|
6 |
# ----------------------------------
|
7 |
-
TOKEN = os.environ.get("HF_TOKEN")
|
8 |
|
9 |
-
OWNER =
|
|
|
|
|
10 |
# ----------------------------------
|
11 |
|
12 |
-
REPO_ID = f"{OWNER}/leaderboard"
|
13 |
-
QUEUE_REPO = f"{OWNER}/
|
14 |
-
RESULTS_REPO = f"{OWNER}/results"
|
15 |
|
16 |
# If you setup a cache later, just change HF_HOME
|
17 |
-
CACHE_PATH=os.getenv("HF_HOME", ".")
|
18 |
|
19 |
# Local caches
|
20 |
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
|
21 |
-
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
|
22 |
-
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
|
23 |
-
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
|
24 |
|
25 |
API = HfApi(token=TOKEN)
|
|
|
4 |
|
5 |
# Info to change for your repository
|
6 |
# ----------------------------------
|
7 |
+
TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
|
8 |
|
9 |
+
OWNER = (
|
10 |
+
"dtcxzyw" # Change to your org - don't forget to create a results and request dataset, with the correct format!
|
11 |
+
)
|
12 |
# ----------------------------------
|
13 |
|
14 |
+
REPO_ID = f"{OWNER}/llvm-apr-benchmark-leaderboard"
|
15 |
+
QUEUE_REPO = f"{OWNER}/llvm-apr-benchmark-submissions"
|
|
|
16 |
|
17 |
# If you setup a cache later, just change HF_HOME
|
18 |
+
CACHE_PATH = os.getenv("HF_HOME", ".")
|
19 |
|
20 |
# Local caches
|
21 |
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
|
|
|
|
|
|
|
22 |
|
23 |
API = HfApi(token=TOKEN)
|
src/leaderboard/read_evals.py
CHANGED
@@ -1,36 +1,27 @@
|
|
1 |
import glob
|
2 |
import json
|
3 |
-
import math
|
4 |
import os
|
5 |
from dataclasses import dataclass
|
6 |
|
7 |
-
import
|
8 |
-
|
9 |
-
|
10 |
-
from src.display.formatting import make_clickable_model
|
11 |
-
from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
|
12 |
-
from src.submission.check_validity import is_model_on_hub
|
13 |
|
14 |
|
15 |
@dataclass
|
16 |
class EvalResult:
|
17 |
-
"""Represents one full evaluation. Built from a combination of the result and request file for a given run.
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
likes: int = 0
|
31 |
-
num_params: int = 0
|
32 |
-
date: str = "" # submission date of request file
|
33 |
-
still_on_hub: bool = False
|
34 |
|
35 |
@classmethod
|
36 |
def init_from_json_file(self, json_filepath):
|
@@ -38,159 +29,65 @@ class EvalResult:
|
|
38 |
with open(json_filepath) as fp:
|
39 |
data = json.load(fp)
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
full_model = "/".join(org_and_model)
|
59 |
-
|
60 |
-
still_on_hub, _, model_config = is_model_on_hub(
|
61 |
-
full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
|
62 |
-
)
|
63 |
-
architecture = "?"
|
64 |
-
if model_config is not None:
|
65 |
-
architectures = getattr(model_config, "architectures", None)
|
66 |
-
if architectures:
|
67 |
-
architecture = ";".join(architectures)
|
68 |
-
|
69 |
-
# Extract results available in this file (some results are split in several files)
|
70 |
-
results = {}
|
71 |
-
for task in Tasks:
|
72 |
-
task = task.value
|
73 |
-
|
74 |
-
# We average all scores of a given metric (not all metrics are present in all files)
|
75 |
-
accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
|
76 |
-
if accs.size == 0 or any([acc is None for acc in accs]):
|
77 |
-
continue
|
78 |
-
|
79 |
-
mean_acc = np.mean(accs) * 100.0
|
80 |
-
results[task.benchmark] = mean_acc
|
81 |
|
82 |
return self(
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
|
|
|
|
92 |
)
|
93 |
|
94 |
-
def update_with_request_file(self, requests_path):
|
95 |
-
"""Finds the relevant request file for the current model and updates info with it"""
|
96 |
-
request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
|
97 |
-
|
98 |
-
try:
|
99 |
-
with open(request_file, "r") as f:
|
100 |
-
request = json.load(f)
|
101 |
-
self.model_type = ModelType.from_str(request.get("model_type", ""))
|
102 |
-
self.weight_type = WeightType[request.get("weight_type", "Original")]
|
103 |
-
self.license = request.get("license", "?")
|
104 |
-
self.likes = request.get("likes", 0)
|
105 |
-
self.num_params = request.get("params", 0)
|
106 |
-
self.date = request.get("submitted_time", "")
|
107 |
-
except Exception:
|
108 |
-
print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
|
109 |
-
|
110 |
def to_dict(self):
|
111 |
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
112 |
-
average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
|
113 |
data_dict = {
|
114 |
-
|
115 |
-
AutoEvalColumn.
|
116 |
-
AutoEvalColumn.
|
117 |
-
AutoEvalColumn.
|
118 |
-
AutoEvalColumn.
|
119 |
-
AutoEvalColumn.
|
120 |
-
AutoEvalColumn.
|
121 |
-
AutoEvalColumn.
|
122 |
-
AutoEvalColumn.
|
123 |
-
AutoEvalColumn.license.name: self.license,
|
124 |
-
AutoEvalColumn.likes.name: self.likes,
|
125 |
-
AutoEvalColumn.params.name: self.num_params,
|
126 |
-
AutoEvalColumn.still_on_hub.name: self.still_on_hub,
|
127 |
}
|
128 |
|
129 |
-
for task in Tasks:
|
130 |
-
data_dict[task.value.col_name] = self.results[task.value.benchmark]
|
131 |
-
|
132 |
return data_dict
|
133 |
|
134 |
|
135 |
-
def
|
136 |
-
"""Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
|
137 |
-
request_files = os.path.join(
|
138 |
-
requests_path,
|
139 |
-
f"{model_name}_eval_request_*.json",
|
140 |
-
)
|
141 |
-
request_files = glob.glob(request_files)
|
142 |
-
|
143 |
-
# Select correct request file (precision)
|
144 |
-
request_file = ""
|
145 |
-
request_files = sorted(request_files, reverse=True)
|
146 |
-
for tmp_request_file in request_files:
|
147 |
-
with open(tmp_request_file, "r") as f:
|
148 |
-
req_content = json.load(f)
|
149 |
-
if (
|
150 |
-
req_content["status"] in ["FINISHED"]
|
151 |
-
and req_content["precision"] == precision.split(".")[-1]
|
152 |
-
):
|
153 |
-
request_file = tmp_request_file
|
154 |
-
return request_file
|
155 |
-
|
156 |
-
|
157 |
-
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
|
158 |
"""From the path of the results folder root, extract all needed info for results"""
|
159 |
-
|
160 |
|
161 |
-
for root, _, files in os.walk(
|
162 |
# We should only have json files in model results
|
163 |
if len(files) == 0 or any([not f.endswith(".json") for f in files]):
|
164 |
continue
|
165 |
|
166 |
-
# Sort the files by date
|
167 |
-
try:
|
168 |
-
files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
|
169 |
-
except dateutil.parser._parser.ParserError:
|
170 |
-
files = [files[-1]]
|
171 |
-
|
172 |
for file in files:
|
173 |
-
|
174 |
-
|
175 |
-
eval_results = {}
|
176 |
-
for model_result_filepath in model_result_filepaths:
|
177 |
-
# Creation of result
|
178 |
-
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
179 |
-
eval_result.update_with_request_file(requests_path)
|
180 |
-
|
181 |
-
# Store results of same eval together
|
182 |
-
eval_name = eval_result.eval_name
|
183 |
-
if eval_name in eval_results.keys():
|
184 |
-
eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
|
185 |
-
else:
|
186 |
-
eval_results[eval_name] = eval_result
|
187 |
-
|
188 |
-
results = []
|
189 |
-
for v in eval_results.values():
|
190 |
-
try:
|
191 |
-
v.to_dict() # we test if the dict version is complete
|
192 |
-
results.append(v)
|
193 |
-
except KeyError: # not all eval values present
|
194 |
-
continue
|
195 |
|
196 |
return results
|
|
|
1 |
import glob
|
2 |
import json
|
|
|
3 |
import os
|
4 |
from dataclasses import dataclass
|
5 |
|
6 |
+
from src.display.formatting import make_hyperlink
|
7 |
+
from src.display.utils import AutoEvalColumn
|
|
|
|
|
|
|
|
|
8 |
|
9 |
|
10 |
@dataclass
|
11 |
class EvalResult:
|
12 |
+
"""Represents one full evaluation. Built from a combination of the result and request file for a given run."""
|
13 |
+
|
14 |
+
method_name: str
|
15 |
+
method_url: str
|
16 |
+
model_name: str
|
17 |
+
model_url: str
|
18 |
+
with_hint: bool
|
19 |
+
attempts: int
|
20 |
+
fast_pass_count: int
|
21 |
+
full_pass_count: int
|
22 |
+
full_pass_count_crash: int
|
23 |
+
full_pass_count_hang: int
|
24 |
+
full_pass_count_miscompilation: int
|
|
|
|
|
|
|
|
|
25 |
|
26 |
@classmethod
|
27 |
def init_from_json_file(self, json_filepath):
|
|
|
29 |
with open(json_filepath) as fp:
|
30 |
data = json.load(fp)
|
31 |
|
32 |
+
method_name = data.get("method_name", "")
|
33 |
+
method_url = data.get("method_url", "")
|
34 |
+
model_name = data.get("model_name", "")
|
35 |
+
model_url = data.get("model_url", "")
|
36 |
+
with_hint = data.get("with_hint", False)
|
37 |
+
fixes = data.get("fixes", [])
|
38 |
+
attempts = len(fixes)
|
39 |
+
fast_pass_count = 0
|
40 |
+
full_pass_count = 0
|
41 |
+
full_pass_count_cat = {}
|
42 |
+
for fix in fixes:
|
43 |
+
bug_type = fix.get("bug_type", "")
|
44 |
+
if fix.get("fast_check_pass", False):
|
45 |
+
fast_pass_count += 1
|
46 |
+
if fix.get("full_check_pass", False):
|
47 |
+
full_pass_count += 1
|
48 |
+
full_pass_count_cat[bug_type] = full_pass_count_cat.get(bug_type, 0) + 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
return self(
|
51 |
+
method_name=method_name,
|
52 |
+
method_url=method_url,
|
53 |
+
model_name=model_name,
|
54 |
+
model_url=model_url,
|
55 |
+
with_hint=with_hint,
|
56 |
+
attempts=attempts,
|
57 |
+
fast_pass_count=fast_pass_count,
|
58 |
+
full_pass_count=full_pass_count,
|
59 |
+
full_pass_count_crash=full_pass_count_cat.get("crash", 0),
|
60 |
+
full_pass_count_hang=full_pass_count_cat.get("hang", 0),
|
61 |
+
full_pass_count_miscompilation=full_pass_count_cat.get("miscompilation", 0),
|
62 |
)
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
def to_dict(self):
|
65 |
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
|
|
66 |
data_dict = {
|
67 |
+
AutoEvalColumn.method_name.name: make_hyperlink(self.method_name, self.method_url),
|
68 |
+
AutoEvalColumn.model_name.name: make_hyperlink(self.model_name, self.model_url),
|
69 |
+
AutoEvalColumn.with_hint.name: self.with_hint,
|
70 |
+
AutoEvalColumn.attempts.name: self.attempts,
|
71 |
+
AutoEvalColumn.fast_pass_count.name: self.fast_pass_count,
|
72 |
+
AutoEvalColumn.full_pass_count.name: self.full_pass_count,
|
73 |
+
AutoEvalColumn.full_pass_count_crash.name: self.full_pass_count_crash,
|
74 |
+
AutoEvalColumn.full_pass_count_hang.name: self.full_pass_count_hang,
|
75 |
+
AutoEvalColumn.full_pass_count_miscompilation.name: self.full_pass_count_miscompilation,
|
|
|
|
|
|
|
|
|
76 |
}
|
77 |
|
|
|
|
|
|
|
78 |
return data_dict
|
79 |
|
80 |
|
81 |
+
def get_raw_eval_results(requests_path: str) -> list[EvalResult]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
"""From the path of the results folder root, extract all needed info for results"""
|
83 |
+
results = []
|
84 |
|
85 |
+
for root, _, files in os.walk(requests_path):
|
86 |
# We should only have json files in model results
|
87 |
if len(files) == 0 or any([not f.endswith(".json") for f in files]):
|
88 |
continue
|
89 |
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
for file in files:
|
91 |
+
results.append(EvalResult.init_from_json_file(os.path.join(root, file)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
return results
|
src/populate.py
CHANGED
@@ -3,14 +3,14 @@ import os
|
|
3 |
|
4 |
import pandas as pd
|
5 |
|
6 |
-
from src.display.formatting import has_no_nan_values
|
7 |
-
from src.display.utils import AutoEvalColumn
|
8 |
from src.leaderboard.read_evals import get_raw_eval_results
|
9 |
|
10 |
|
11 |
-
def get_leaderboard_df(
|
12 |
"""Creates a dataframe from all the individual experiment results"""
|
13 |
-
raw_data = get_raw_eval_results(
|
14 |
all_data_json = [v.to_dict() for v in raw_data]
|
15 |
|
16 |
df = pd.DataFrame.from_records(all_data_json)
|
@@ -20,39 +20,3 @@ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchm
|
|
20 |
# filter out if any of the benchmarks have not been produced
|
21 |
df = df[has_no_nan_values(df, benchmark_cols)]
|
22 |
return df
|
23 |
-
|
24 |
-
|
25 |
-
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
26 |
-
"""Creates the different dataframes for the evaluation queues requestes"""
|
27 |
-
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
|
28 |
-
all_evals = []
|
29 |
-
|
30 |
-
for entry in entries:
|
31 |
-
if ".json" in entry:
|
32 |
-
file_path = os.path.join(save_path, entry)
|
33 |
-
with open(file_path) as fp:
|
34 |
-
data = json.load(fp)
|
35 |
-
|
36 |
-
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
|
37 |
-
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
|
38 |
-
|
39 |
-
all_evals.append(data)
|
40 |
-
elif ".md" not in entry:
|
41 |
-
# this is a folder
|
42 |
-
sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")]
|
43 |
-
for sub_entry in sub_entries:
|
44 |
-
file_path = os.path.join(save_path, entry, sub_entry)
|
45 |
-
with open(file_path) as fp:
|
46 |
-
data = json.load(fp)
|
47 |
-
|
48 |
-
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
|
49 |
-
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
|
50 |
-
all_evals.append(data)
|
51 |
-
|
52 |
-
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
|
53 |
-
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
|
54 |
-
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
|
55 |
-
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
|
56 |
-
df_running = pd.DataFrame.from_records(running_list, columns=cols)
|
57 |
-
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
|
58 |
-
return df_finished[cols], df_running[cols], df_pending[cols]
|
|
|
3 |
|
4 |
import pandas as pd
|
5 |
|
6 |
+
from src.display.formatting import has_no_nan_values
|
7 |
+
from src.display.utils import AutoEvalColumn
|
8 |
from src.leaderboard.read_evals import get_raw_eval_results
|
9 |
|
10 |
|
11 |
+
def get_leaderboard_df(requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
|
12 |
"""Creates a dataframe from all the individual experiment results"""
|
13 |
+
raw_data = get_raw_eval_results(requests_path)
|
14 |
all_data_json = [v.to_dict() for v in raw_data]
|
15 |
|
16 |
df = pd.DataFrame.from_records(all_data_json)
|
|
|
20 |
# filter out if any of the benchmarks have not been produced
|
21 |
df = df[has_no_nan_values(df, benchmark_cols)]
|
22 |
return df
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/submission/check_validity.py
DELETED
@@ -1,99 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
import re
|
4 |
-
from collections import defaultdict
|
5 |
-
from datetime import datetime, timedelta, timezone
|
6 |
-
|
7 |
-
import huggingface_hub
|
8 |
-
from huggingface_hub import ModelCard
|
9 |
-
from huggingface_hub.hf_api import ModelInfo
|
10 |
-
from transformers import AutoConfig
|
11 |
-
from transformers.models.auto.tokenization_auto import AutoTokenizer
|
12 |
-
|
13 |
-
def check_model_card(repo_id: str) -> tuple[bool, str]:
|
14 |
-
"""Checks if the model card and license exist and have been filled"""
|
15 |
-
try:
|
16 |
-
card = ModelCard.load(repo_id)
|
17 |
-
except huggingface_hub.utils.EntryNotFoundError:
|
18 |
-
return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
|
19 |
-
|
20 |
-
# Enforce license metadata
|
21 |
-
if card.data.license is None:
|
22 |
-
if not ("license_name" in card.data and "license_link" in card.data):
|
23 |
-
return False, (
|
24 |
-
"License not found. Please add a license to your model card using the `license` metadata or a"
|
25 |
-
" `license_name`/`license_link` pair."
|
26 |
-
)
|
27 |
-
|
28 |
-
# Enforce card content
|
29 |
-
if len(card.text) < 200:
|
30 |
-
return False, "Please add a description to your model card, it is too short."
|
31 |
-
|
32 |
-
return True, ""
|
33 |
-
|
34 |
-
def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
|
35 |
-
"""Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
|
36 |
-
try:
|
37 |
-
config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
38 |
-
if test_tokenizer:
|
39 |
-
try:
|
40 |
-
tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
41 |
-
except ValueError as e:
|
42 |
-
return (
|
43 |
-
False,
|
44 |
-
f"uses a tokenizer which is not in a transformers release: {e}",
|
45 |
-
None
|
46 |
-
)
|
47 |
-
except Exception as e:
|
48 |
-
return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
|
49 |
-
return True, None, config
|
50 |
-
|
51 |
-
except ValueError:
|
52 |
-
return (
|
53 |
-
False,
|
54 |
-
"needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
|
55 |
-
None
|
56 |
-
)
|
57 |
-
|
58 |
-
except Exception as e:
|
59 |
-
return False, "was not found on hub!", None
|
60 |
-
|
61 |
-
|
62 |
-
def get_model_size(model_info: ModelInfo, precision: str):
|
63 |
-
"""Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
|
64 |
-
try:
|
65 |
-
model_size = round(model_info.safetensors["total"] / 1e9, 3)
|
66 |
-
except (AttributeError, TypeError):
|
67 |
-
return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
|
68 |
-
|
69 |
-
size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
|
70 |
-
model_size = size_factor * model_size
|
71 |
-
return model_size
|
72 |
-
|
73 |
-
def get_model_arch(model_info: ModelInfo):
|
74 |
-
"""Gets the model architecture from the configuration"""
|
75 |
-
return model_info.config.get("architectures", "Unknown")
|
76 |
-
|
77 |
-
def already_submitted_models(requested_models_dir: str) -> set[str]:
|
78 |
-
"""Gather a list of already submitted models to avoid duplicates"""
|
79 |
-
depth = 1
|
80 |
-
file_names = []
|
81 |
-
users_to_submission_dates = defaultdict(list)
|
82 |
-
|
83 |
-
for root, _, files in os.walk(requested_models_dir):
|
84 |
-
current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
|
85 |
-
if current_depth == depth:
|
86 |
-
for file in files:
|
87 |
-
if not file.endswith(".json"):
|
88 |
-
continue
|
89 |
-
with open(os.path.join(root, file), "r") as f:
|
90 |
-
info = json.load(f)
|
91 |
-
file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
|
92 |
-
|
93 |
-
# Select organisation
|
94 |
-
if info["model"].count("/") == 0 or "submitted_time" not in info:
|
95 |
-
continue
|
96 |
-
organisation, _ = info["model"].split("/")
|
97 |
-
users_to_submission_dates[organisation].append(info["submitted_time"])
|
98 |
-
|
99 |
-
return set(file_names), users_to_submission_dates
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/submission/submit.py
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
from datetime import datetime, timezone
|
4 |
-
|
5 |
-
from src.display.formatting import styled_error, styled_message, styled_warning
|
6 |
-
from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
|
7 |
-
from src.submission.check_validity import (
|
8 |
-
already_submitted_models,
|
9 |
-
check_model_card,
|
10 |
-
get_model_size,
|
11 |
-
is_model_on_hub,
|
12 |
-
)
|
13 |
-
|
14 |
-
REQUESTED_MODELS = None
|
15 |
-
USERS_TO_SUBMISSION_DATES = None
|
16 |
-
|
17 |
-
def add_new_eval(
|
18 |
-
model: str,
|
19 |
-
base_model: str,
|
20 |
-
revision: str,
|
21 |
-
precision: str,
|
22 |
-
weight_type: str,
|
23 |
-
model_type: str,
|
24 |
-
):
|
25 |
-
global REQUESTED_MODELS
|
26 |
-
global USERS_TO_SUBMISSION_DATES
|
27 |
-
if not REQUESTED_MODELS:
|
28 |
-
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
29 |
-
|
30 |
-
user_name = ""
|
31 |
-
model_path = model
|
32 |
-
if "/" in model:
|
33 |
-
user_name = model.split("/")[0]
|
34 |
-
model_path = model.split("/")[1]
|
35 |
-
|
36 |
-
precision = precision.split(" ")[0]
|
37 |
-
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
38 |
-
|
39 |
-
if model_type is None or model_type == "":
|
40 |
-
return styled_error("Please select a model type.")
|
41 |
-
|
42 |
-
# Does the model actually exist?
|
43 |
-
if revision == "":
|
44 |
-
revision = "main"
|
45 |
-
|
46 |
-
# Is the model on the hub?
|
47 |
-
if weight_type in ["Delta", "Adapter"]:
|
48 |
-
base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
|
49 |
-
if not base_model_on_hub:
|
50 |
-
return styled_error(f'Base model "{base_model}" {error}')
|
51 |
-
|
52 |
-
if not weight_type == "Adapter":
|
53 |
-
model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
|
54 |
-
if not model_on_hub:
|
55 |
-
return styled_error(f'Model "{model}" {error}')
|
56 |
-
|
57 |
-
# Is the model info correctly filled?
|
58 |
-
try:
|
59 |
-
model_info = API.model_info(repo_id=model, revision=revision)
|
60 |
-
except Exception:
|
61 |
-
return styled_error("Could not get your model information. Please fill it up properly.")
|
62 |
-
|
63 |
-
model_size = get_model_size(model_info=model_info, precision=precision)
|
64 |
-
|
65 |
-
# Were the model card and license filled?
|
66 |
-
try:
|
67 |
-
license = model_info.cardData["license"]
|
68 |
-
except Exception:
|
69 |
-
return styled_error("Please select a license for your model")
|
70 |
-
|
71 |
-
modelcard_OK, error_msg = check_model_card(model)
|
72 |
-
if not modelcard_OK:
|
73 |
-
return styled_error(error_msg)
|
74 |
-
|
75 |
-
# Seems good, creating the eval
|
76 |
-
print("Adding new eval")
|
77 |
-
|
78 |
-
eval_entry = {
|
79 |
-
"model": model,
|
80 |
-
"base_model": base_model,
|
81 |
-
"revision": revision,
|
82 |
-
"precision": precision,
|
83 |
-
"weight_type": weight_type,
|
84 |
-
"status": "PENDING",
|
85 |
-
"submitted_time": current_time,
|
86 |
-
"model_type": model_type,
|
87 |
-
"likes": model_info.likes,
|
88 |
-
"params": model_size,
|
89 |
-
"license": license,
|
90 |
-
"private": False,
|
91 |
-
}
|
92 |
-
|
93 |
-
# Check for duplicate submission
|
94 |
-
if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
|
95 |
-
return styled_warning("This model has been already submitted.")
|
96 |
-
|
97 |
-
print("Creating eval file")
|
98 |
-
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
99 |
-
os.makedirs(OUT_DIR, exist_ok=True)
|
100 |
-
out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
|
101 |
-
|
102 |
-
with open(out_path, "w") as f:
|
103 |
-
f.write(json.dumps(eval_entry))
|
104 |
-
|
105 |
-
print("Uploading eval file")
|
106 |
-
API.upload_file(
|
107 |
-
path_or_fileobj=out_path,
|
108 |
-
path_in_repo=out_path.split("eval-queue/")[1],
|
109 |
-
repo_id=QUEUE_REPO,
|
110 |
-
repo_type="dataset",
|
111 |
-
commit_message=f"Add {model} to eval queue",
|
112 |
-
)
|
113 |
-
|
114 |
-
# Remove the local file
|
115 |
-
os.remove(out_path)
|
116 |
-
|
117 |
-
return styled_message(
|
118 |
-
"Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
|
119 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|