haiengchuihaian commited on
Commit
1eaecb2
·
1 Parent(s): 9027d90

change leaderboard and submit

Browse files
app.py CHANGED
@@ -27,176 +27,180 @@ from src.display.utils import (
27
  )
28
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, TOKEN, QUEUE_REPO, REPO_ID, RESULTS_REPO
29
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
30
- from src.submission.submit import add_new_eval
31
 
32
 
33
  def restart_space():
34
  API.restart_space(repo_id=REPO_ID, token=TOKEN)
35
 
36
- try:
37
- print(EVAL_REQUESTS_PATH)
38
- snapshot_download(
39
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
40
- )
41
- except Exception:
42
- restart_space()
43
- try:
44
- print(EVAL_RESULTS_PATH)
45
- snapshot_download(
46
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
47
- )
48
- except Exception:
49
- restart_space()
50
 
51
 
52
  raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
 
 
 
 
53
  leaderboard_df = original_df.copy()
54
 
55
- (
56
- finished_eval_queue_df,
57
- running_eval_queue_df,
58
- pending_eval_queue_df,
59
- ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
60
 
61
 
62
  # Searching and filtering
63
- def update_table(
64
- hidden_df: pd.DataFrame,
65
- columns: list,
66
- type_query: list,
67
- precision_query: str,
68
- size_query: list,
69
- show_deleted: bool,
70
- query: str,
71
- ):
72
- filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
73
- filtered_df = filter_queries(query, filtered_df)
74
- df = select_columns(filtered_df, columns)
75
- return df
76
-
77
-
78
- def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
79
- return df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))]
80
-
81
-
82
- def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
83
- always_here_cols = [
84
- AutoEvalColumn.model_type_symbol.name,
85
- AutoEvalColumn.model.name,
86
- ]
87
- # We use COLS to maintain sorting
88
- filtered_df = df[
89
- always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name]
90
- ]
91
- return filtered_df
92
-
93
-
94
- def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
95
- final_df = []
96
- if query != "":
97
- queries = [q.strip() for q in query.split(";")]
98
- for _q in queries:
99
- _q = _q.strip()
100
- if _q != "":
101
- temp_filtered_df = search_table(filtered_df, _q)
102
- if len(temp_filtered_df) > 0:
103
- final_df.append(temp_filtered_df)
104
- if len(final_df) > 0:
105
- filtered_df = pd.concat(final_df)
106
- filtered_df = filtered_df.drop_duplicates(
107
- subset=[AutoEvalColumn.model.name, AutoEvalColumn.precision.name, AutoEvalColumn.revision.name]
108
- )
109
-
110
- return filtered_df
111
-
112
-
113
- def filter_models(
114
- df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
115
- ) -> pd.DataFrame:
116
- # Show all models
117
- if show_deleted:
118
- filtered_df = df
119
- else: # Show only still on the hub models
120
- filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
121
-
122
- type_emoji = [t[0] for t in type_query]
123
- filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
124
- filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
125
-
126
- numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
127
- params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
128
- mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
129
- filtered_df = filtered_df.loc[mask]
130
-
131
- return filtered_df
132
-
133
-
 
134
  demo = gr.Blocks(css=custom_css)
135
  with demo:
136
  gr.HTML(TITLE)
137
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
138
 
139
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
140
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
141
- with gr.Row():
142
- with gr.Column():
143
- with gr.Row():
144
- search_bar = gr.Textbox(
145
- placeholder=" 🔍 Search for your model (separate multiple queries with `;`) and press ENTER...",
146
- show_label=False,
147
- elem_id="search-bar",
148
- )
149
- with gr.Row():
150
- shown_columns = gr.CheckboxGroup(
151
- choices=[
152
- c.name
153
- for c in fields(AutoEvalColumn)
154
- if not c.hidden and not c.never_hidden and not c.dummy
155
- ],
156
- value=[
157
- c.name
158
- for c in fields(AutoEvalColumn)
159
- if c.displayed_by_default and not c.hidden and not c.never_hidden
160
- ],
161
- label="Select columns to show",
162
- elem_id="column-select",
163
- interactive=True,
164
- )
165
- with gr.Row():
166
- deleted_models_visibility = gr.Checkbox(
167
- value=False, label="Show gated/private/deleted models", interactive=True
168
- )
169
- with gr.Column(min_width=320):
170
- #with gr.Box(elem_id="box-filter"):
171
- filter_columns_type = gr.CheckboxGroup(
172
- label="Model types",
173
- choices=[t.to_str() for t in ModelType],
174
- value=[t.to_str() for t in ModelType],
175
- interactive=True,
176
- elem_id="filter-columns-type",
177
- )
178
- filter_columns_precision = gr.CheckboxGroup(
179
- label="Precision",
180
- choices=[i.value.name for i in Precision],
181
- value=[i.value.name for i in Precision],
182
- interactive=True,
183
- elem_id="filter-columns-precision",
184
- )
185
- filter_columns_size = gr.CheckboxGroup(
186
- label="Model sizes (in billions of parameters)",
187
- choices=list(NUMERIC_INTERVALS.keys()),
188
- value=list(NUMERIC_INTERVALS.keys()),
189
- interactive=True,
190
- elem_id="filter-columns-size",
191
- )
192
 
193
  leaderboard_table = gr.components.Dataframe(
194
  value=leaderboard_df[
195
- [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
196
- + shown_columns.value
197
  + [AutoEvalColumn.dummy.name]
198
  ],
199
- headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
200
  datatype=TYPES,
201
  elem_id="leaderboard-table",
202
  interactive=False,
@@ -211,34 +215,34 @@ with demo:
211
  datatype=TYPES,
212
  visible=False,
213
  )
214
- search_bar.submit(
215
- update_table,
216
- [
217
- hidden_leaderboard_table_for_search,
218
- shown_columns,
219
- filter_columns_type,
220
- filter_columns_precision,
221
- filter_columns_size,
222
- deleted_models_visibility,
223
- search_bar,
224
- ],
225
- leaderboard_table,
226
- )
227
- for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size, deleted_models_visibility]:
228
- selector.change(
229
- update_table,
230
- [
231
- hidden_leaderboard_table_for_search,
232
- shown_columns,
233
- filter_columns_type,
234
- filter_columns_precision,
235
- filter_columns_size,
236
- deleted_models_visibility,
237
- search_bar,
238
- ],
239
- leaderboard_table,
240
- queue=True,
241
- )
242
 
243
  with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
244
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
@@ -248,87 +252,88 @@ with demo:
248
  with gr.Row():
249
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
250
 
251
- with gr.Column():
252
- with gr.Accordion(
253
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
254
- open=False,
255
- ):
256
- with gr.Row():
257
- finished_eval_table = gr.components.Dataframe(
258
- value=finished_eval_queue_df,
259
- headers=EVAL_COLS,
260
- datatype=EVAL_TYPES,
261
- row_count=5,
262
- )
263
- with gr.Accordion(
264
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
265
- open=False,
266
- ):
267
- with gr.Row():
268
- running_eval_table = gr.components.Dataframe(
269
- value=running_eval_queue_df,
270
- headers=EVAL_COLS,
271
- datatype=EVAL_TYPES,
272
- row_count=5,
273
- )
274
-
275
- with gr.Accordion(
276
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
277
- open=False,
278
- ):
279
- with gr.Row():
280
- pending_eval_table = gr.components.Dataframe(
281
- value=pending_eval_queue_df,
282
- headers=EVAL_COLS,
283
- datatype=EVAL_TYPES,
284
- row_count=5,
285
- )
286
  with gr.Row():
287
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
288
 
289
  with gr.Row():
290
- with gr.Column():
291
- model_name_textbox = gr.Textbox(label="Model name")
292
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
293
- model_type = gr.Dropdown(
294
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
295
- label="Model type",
296
- multiselect=False,
297
- value=None,
298
- interactive=True,
299
- )
300
-
301
- with gr.Column():
302
- precision = gr.Dropdown(
303
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
304
- label="Precision",
305
- multiselect=False,
306
- value="float16",
307
- interactive=True,
308
- )
309
- weight_type = gr.Dropdown(
310
- choices=[i.value.name for i in WeightType],
311
- label="Weights type",
312
- multiselect=False,
313
- value="Original",
314
- interactive=True,
315
- )
316
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
317
-
318
- submit_button = gr.Button("Submit Eval")
319
- submission_result = gr.Markdown()
320
- submit_button.click(
321
- add_new_eval,
322
- [
323
- model_name_textbox,
324
- base_model_name_textbox,
325
- revision_name_textbox,
326
- precision,
327
- weight_type,
328
- model_type,
329
- ],
330
- submission_result,
331
- )
 
332
 
333
  with gr.Row():
334
  with gr.Accordion("📙 Citation", open=False):
@@ -341,6 +346,7 @@ with demo:
341
  )
342
 
343
  scheduler = BackgroundScheduler()
344
- scheduler.add_job(restart_space, "interval", seconds=1800)
345
  scheduler.start()
346
- demo.queue(default_concurrency_limit=40).launch()
 
 
27
  )
28
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, TOKEN, QUEUE_REPO, REPO_ID, RESULTS_REPO
29
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
30
+ from src.submission.submit import add_new_eval, upload_file
31
 
32
 
33
  def restart_space():
34
  API.restart_space(repo_id=REPO_ID, token=TOKEN)
35
 
36
+ # try:
37
+ # print(EVAL_REQUESTS_PATH)
38
+ # snapshot_download(
39
+ # repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
40
+ # )
41
+ # except Exception:
42
+ # restart_space()
43
+ # try:
44
+ # print(EVAL_RESULTS_PATH)
45
+ # snapshot_download(
46
+ # repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
47
+ # )
48
+ # except Exception:
49
+ # restart_space()
50
 
51
 
52
  raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
53
+ value=[ c.name for c in fields(AutoEvalColumn)
54
+ if c.displayed_by_default and not c.hidden and not c.never_hidden]
55
+
56
+
57
  leaderboard_df = original_df.copy()
58
 
59
+ # (
60
+ # finished_eval_queue_df,
61
+ # running_eval_queue_df,
62
+ # pending_eval_queue_df,
63
+ # ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
64
 
65
 
66
  # Searching and filtering
67
+ # def update_table(
68
+ # hidden_df: pd.DataFrame,
69
+ # columns: list,
70
+ # type_query: list,
71
+ # precision_query: str,
72
+ # size_query: list,
73
+ # show_deleted: bool,
74
+ # query: str,
75
+ # ):
76
+ # filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
77
+ # filtered_df = filter_queries(query, filtered_df)
78
+ # df = select_columns(filtered_df, columns)
79
+ # return df
80
+
81
+
82
+ # def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
83
+ # return df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))]
84
+
85
+
86
+ # def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
87
+ # always_here_cols = [
88
+ # AutoEvalColumn.model_type_symbol.name,
89
+ # AutoEvalColumn.model.name,
90
+ # ]
91
+ # # We use COLS to maintain sorting
92
+ # filtered_df = df[
93
+ # always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name]
94
+ # ]
95
+ # return filtered_df
96
+
97
+
98
+ # def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
99
+ # final_df = []
100
+ # if query != "":
101
+ # queries = [q.strip() for q in query.split(";")]
102
+ # for _q in queries:
103
+ # _q = _q.strip()
104
+ # if _q != "":
105
+ # temp_filtered_df = search_table(filtered_df, _q)
106
+ # if len(temp_filtered_df) > 0:
107
+ # final_df.append(temp_filtered_df)
108
+ # if len(final_df) > 0:
109
+ # filtered_df = pd.concat(final_df)
110
+ # filtered_df = filtered_df.drop_duplicates(
111
+ # subset=[AutoEvalColumn.model.name, AutoEvalColumn.precision.name, AutoEvalColumn.revision.name]
112
+ # )
113
+
114
+ # return filtered_df
115
+
116
+
117
+ # def filter_models(
118
+ # df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
119
+ # ) -> pd.DataFrame:
120
+ # # Show all models
121
+ # if show_deleted:
122
+ # filtered_df = df
123
+ # else: # Show only still on the hub models
124
+ # filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
125
+
126
+ # type_emoji = [t[0] for t in type_query]
127
+ # filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
128
+ # filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
129
+
130
+ # numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
131
+ # params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
132
+ # mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
133
+ # filtered_df = filtered_df.loc[mask]
134
+
135
+ # return filtered_df
136
+
137
+
138
+ # print([c.name for c in fields(AutoEvalColumn) if c.never_hidden])
139
  demo = gr.Blocks(css=custom_css)
140
  with demo:
141
  gr.HTML(TITLE)
142
+ # gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
143
 
144
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
145
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
146
+ # with gr.Row():
147
+ # with gr.Column():
148
+ # with gr.Row():
149
+ # search_bar = gr.Textbox(
150
+ # placeholder=" 🔍 Search for your model (separate multiple queries with `;`) and press ENTER...",
151
+ # show_label=False,
152
+ # elem_id="search-bar",
153
+ # )
154
+ # with gr.Row():
155
+ # shown_columns = gr.CheckboxGroup(
156
+ # choices=[
157
+ # c.name
158
+ # for c in fields(AutoEvalColumn)
159
+ # if not c.hidden and not c.never_hidden and not c.dummy
160
+ # ],
161
+ # value=[
162
+ # c.name
163
+ # for c in fields(AutoEvalColumn)
164
+ # if c.displayed_by_default and not c.hidden and not c.never_hidden
165
+ # ],
166
+ # label="Select columns to show",
167
+ # elem_id="column-select",
168
+ # interactive=True,
169
+ # )
170
+ # with gr.Row():
171
+ # deleted_models_visibility = gr.Checkbox(
172
+ # value=False, label="Show gated/private/deleted models", interactive=True
173
+ # )
174
+ # with gr.Column(min_width=320):
175
+ # #with gr.Box(elem_id="box-filter"):
176
+ # filter_columns_type = gr.CheckboxGroup(
177
+ # label="Model types",
178
+ # choices=[t.to_str() for t in ModelType],
179
+ # value=[t.to_str() for t in ModelType],
180
+ # interactive=True,
181
+ # elem_id="filter-columns-type",
182
+ # )
183
+ # filter_columns_precision = gr.CheckboxGroup(
184
+ # label="Precision",
185
+ # choices=[i.value.name for i in Precision],
186
+ # value=[i.value.name for i in Precision],
187
+ # interactive=True,
188
+ # elem_id="filter-columns-precision",
189
+ # )
190
+ # filter_columns_size = gr.CheckboxGroup(
191
+ # label="Model sizes (in billions of parameters)",
192
+ # choices=list(NUMERIC_INTERVALS.keys()),
193
+ # value=list(NUMERIC_INTERVALS.keys()),
194
+ # interactive=True,
195
+ # elem_id="filter-columns-size",
196
+ # )
197
 
198
  leaderboard_table = gr.components.Dataframe(
199
  value=leaderboard_df[
200
+ [c.name for c in fields(AutoEvalColumn) if c.never_hidden] + value
 
201
  + [AutoEvalColumn.dummy.name]
202
  ],
203
+ headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + value,
204
  datatype=TYPES,
205
  elem_id="leaderboard-table",
206
  interactive=False,
 
215
  datatype=TYPES,
216
  visible=False,
217
  )
218
+ # search_bar.submit(
219
+ # update_table,
220
+ # [
221
+ # hidden_leaderboard_table_for_search,
222
+ # shown_columns,
223
+ # filter_columns_type,
224
+ # filter_columns_precision,
225
+ # filter_columns_size,
226
+ # deleted_models_visibility,
227
+ # search_bar,
228
+ # ],
229
+ # leaderboard_table,
230
+ # )
231
+ # for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size, deleted_models_visibility]:
232
+ # selector.change(
233
+ # update_table,
234
+ # [
235
+ # hidden_leaderboard_table_for_search,
236
+ # shown_columns,
237
+ # filter_columns_type,
238
+ # filter_columns_precision,
239
+ # filter_columns_size,
240
+ # deleted_models_visibility,
241
+ # search_bar,
242
+ # ],
243
+ # leaderboard_table,
244
+ # queue=True,
245
+ # )
246
 
247
  with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
248
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
 
252
  with gr.Row():
253
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
254
 
255
+ # with gr.Column():
256
+ # with gr.Accordion(
257
+ # f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
258
+ # open=False,
259
+ # ):
260
+ # with gr.Row():
261
+ # finished_eval_table = gr.components.Dataframe(
262
+ # value=finished_eval_queue_df,
263
+ # headers=EVAL_COLS,
264
+ # datatype=EVAL_TYPES,
265
+ # row_count=5,
266
+ # )
267
+ # with gr.Accordion(
268
+ # f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
269
+ # open=False,
270
+ # ):
271
+ # with gr.Row():
272
+ # running_eval_table = gr.components.Dataframe(
273
+ # value=running_eval_queue_df,
274
+ # headers=EVAL_COLS,
275
+ # datatype=EVAL_TYPES,
276
+ # row_count=5,
277
+ # )
278
+
279
+ # with gr.Accordion(
280
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
281
+ # open=False,
282
+ # ):
283
+ # with gr.Row():
284
+ # pending_eval_table = gr.components.Dataframe(
285
+ # value=pending_eval_queue_df,
286
+ # headers=EVAL_COLS,
287
+ # datatype=EVAL_TYPES,
288
+ # row_count=5,
289
+ # )
290
  with gr.Row():
291
+ gr.Markdown("# ✉️✨ Submit your files here!", elem_classes="markdown-text")
292
 
293
  with gr.Row():
294
+ upload = gr.Interface(fn=upload_file,inputs="file" ,outputs=None)
295
+ # with gr.Column():
296
+ # model_name_textbox = gr.Textbox(label="Model name")
297
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
298
+ # model_type = gr.Dropdown(
299
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
300
+ # label="Model type",
301
+ # multiselect=False,
302
+ # value=None,
303
+ # interactive=True,
304
+ # )
305
+
306
+ # with gr.Column():
307
+ # precision = gr.Dropdown(
308
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
309
+ # label="Precision",
310
+ # multiselect=False,
311
+ # value="float16",
312
+ # interactive=True,
313
+ # )
314
+ # weight_type = gr.Dropdown(
315
+ # choices=[i.value.name for i in WeightType],
316
+ # label="Weights type",
317
+ # multiselect=False,
318
+ # value="Original",
319
+ # interactive=True,
320
+ # )
321
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
322
+
323
+ # submit_button = gr.Button("Submit Eval")
324
+ # submission_result = gr.Markdown()
325
+ # submit_button.click(
326
+ # add_new_eval,
327
+ # [
328
+ # model_name_textbox,
329
+ # base_model_name_textbox,
330
+ # revision_name_textbox,
331
+ # precision,
332
+ # weight_type,
333
+ # model_type,
334
+ # ],
335
+ # submission_result,
336
+ # )
337
 
338
  with gr.Row():
339
  with gr.Accordion("📙 Citation", open=False):
 
346
  )
347
 
348
  scheduler = BackgroundScheduler()
349
+ scheduler.add_job(restart_space, "interval", seconds=30)
350
  scheduler.start()
351
+
352
+ demo.queue(default_concurrency_limit=40).launch()
src/display/about.py CHANGED
@@ -16,7 +16,7 @@ class Tasks(Enum):
16
 
17
 
18
  # Your leaderboard name
19
- TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
20
 
21
  # What does your leaderboard evaluate?
22
  INTRODUCTION_TEXT = """
 
16
 
17
 
18
  # Your leaderboard name
19
+ TITLE = """<h1 align="center" id="space-title">OPENT2T LEADERBOARD</h1>"""
20
 
21
  # What does your leaderboard evaluate?
22
  INTRODUCTION_TEXT = """
src/display/utils.py CHANGED
@@ -31,15 +31,15 @@ auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average
31
  for task in Tasks:
32
  auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
33
  # Model information
34
- auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
35
- auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
36
- auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
37
- auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
38
- auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
39
- auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
40
- auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
41
- auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
42
- auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
43
  # Dummy column for the search bar (hidden by the custom CSS)
44
  auto_eval_column_dict.append(["dummy", ColumnContent, ColumnContent("model_name_for_query", "str", False, dummy=True)])
45
 
 
31
  for task in Tasks:
32
  auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
33
  # Model information
34
+ # auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
35
+ # auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
36
+ # auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
37
+ # auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
38
+ # auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
39
+ # auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
40
+ # auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
41
+ # auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
42
+ # auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
43
  # Dummy column for the search bar (hidden by the custom CSS)
44
  auto_eval_column_dict.append(["dummy", ColumnContent, ColumnContent("model_name_for_query", "str", False, dummy=True)])
45
 
src/leaderboard/read_evals.py CHANGED
@@ -23,12 +23,12 @@ class EvalResult:
23
  precision: Precision = Precision.Unknown
24
  model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
25
  weight_type: WeightType = WeightType.Original # Original or Adapter
26
- architecture: str = "Unknown"
27
  license: str = "?"
28
  likes: int = 0
29
  num_params: int = 0
30
  date: str = "" # submission date of request file
31
- still_on_hub: bool = False
32
 
33
  @classmethod
34
  def init_from_json_file(self, json_filepath):
@@ -38,7 +38,7 @@ class EvalResult:
38
 
39
  config = data.get("config")
40
 
41
- # Precision
42
  precision = Precision.from_str(config.get("model_dtype"))
43
 
44
  # Get model and org
@@ -55,14 +55,14 @@ class EvalResult:
55
  result_key = f"{org}_{model}_{precision.value.name}"
56
  full_model = "/".join(org_and_model)
57
 
58
- still_on_hub, _, model_config = is_model_on_hub(
59
- full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
60
- )
61
- architecture = "?"
62
- if model_config is not None:
63
- architectures = getattr(model_config, "architectures", None)
64
- if architectures:
65
- architecture = ";".join(architectures)
66
 
67
  # Extract results available in this file (some results are split in several files)
68
  results = {}
@@ -85,8 +85,8 @@ class EvalResult:
85
  results=results,
86
  precision=precision,
87
  revision= config.get("model_sha", ""),
88
- still_on_hub=still_on_hub,
89
- architecture=architecture
90
  )
91
 
92
  def update_with_request_file(self, requests_path):
@@ -110,19 +110,19 @@ class EvalResult:
110
  average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
111
  data_dict = {
112
  "eval_name": self.eval_name, # not a column, just a save name,
113
- AutoEvalColumn.precision.name: self.precision.value.name,
114
- AutoEvalColumn.model_type.name: self.model_type.value.name,
115
  AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
116
- AutoEvalColumn.weight_type.name: self.weight_type.value.name,
117
- AutoEvalColumn.architecture.name: self.architecture,
118
  AutoEvalColumn.model.name: make_clickable_model(self.full_model),
119
  AutoEvalColumn.dummy.name: self.full_model,
120
- AutoEvalColumn.revision.name: self.revision,
121
  AutoEvalColumn.average.name: average,
122
- AutoEvalColumn.license.name: self.license,
123
- AutoEvalColumn.likes.name: self.likes,
124
- AutoEvalColumn.params.name: self.num_params,
125
- AutoEvalColumn.still_on_hub.name: self.still_on_hub,
126
  }
127
 
128
  for task in Tasks:
@@ -156,8 +156,9 @@ def get_request_file_for_model(requests_path, model_name, precision):
156
  def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
157
  """From the path of the results folder root, extract all needed info for results"""
158
  model_result_filepaths = []
159
-
160
  for root, _, files in os.walk(results_path):
 
161
  # We should only have json files in model results
162
  if len(files) == 0 or any([not f.endswith(".json") for f in files]):
163
  continue
@@ -185,6 +186,7 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
185
  eval_results[eval_name] = eval_result
186
 
187
  results = []
 
188
  for v in eval_results.values():
189
  try:
190
  v.to_dict() # we test if the dict version is complete
 
23
  precision: Precision = Precision.Unknown
24
  model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
25
  weight_type: WeightType = WeightType.Original # Original or Adapter
26
+ # architecture: str = "Unknown"
27
  license: str = "?"
28
  likes: int = 0
29
  num_params: int = 0
30
  date: str = "" # submission date of request file
31
+ # still_on_hub: bool = False
32
 
33
  @classmethod
34
  def init_from_json_file(self, json_filepath):
 
38
 
39
  config = data.get("config")
40
 
41
+ # # Precision
42
  precision = Precision.from_str(config.get("model_dtype"))
43
 
44
  # Get model and org
 
55
  result_key = f"{org}_{model}_{precision.value.name}"
56
  full_model = "/".join(org_and_model)
57
 
58
+ # still_on_hub, _, model_config = is_model_on_hub(
59
+ # full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
60
+ # )
61
+ # architecture = "?"
62
+ # if model_config is not None:
63
+ # architectures = getattr(model_config, "architectures", None)
64
+ # if architectures:
65
+ # architecture = ";".join(architectures)
66
 
67
  # Extract results available in this file (some results are split in several files)
68
  results = {}
 
85
  results=results,
86
  precision=precision,
87
  revision= config.get("model_sha", ""),
88
+ # still_on_hub=still_on_hub,
89
+ # architecture=architecture
90
  )
91
 
92
  def update_with_request_file(self, requests_path):
 
110
  average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
111
  data_dict = {
112
  "eval_name": self.eval_name, # not a column, just a save name,
113
+ # AutoEvalColumn.precision.name: self.precision.value.name,
114
+ # AutoEvalColumn.model_type.name: self.model_type.value.name,
115
  AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
116
+ # AutoEvalColumn.weight_type.name: self.weight_type.value.name,
117
+ # AutoEvalColumn.architecture.name: self.architecture,
118
  AutoEvalColumn.model.name: make_clickable_model(self.full_model),
119
  AutoEvalColumn.dummy.name: self.full_model,
120
+ # AutoEvalColumn.revision.name: self.revision,
121
  AutoEvalColumn.average.name: average,
122
+ # AutoEvalColumn.license.name: self.license,
123
+ # AutoEvalColumn.likes.name: self.likes,
124
+ # AutoEvalColumn.params.name: self.num_params,
125
+ # AutoEvalColumn.still_on_hub.name: self.still_on_hub,
126
  }
127
 
128
  for task in Tasks:
 
156
  def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
157
  """From the path of the results folder root, extract all needed info for results"""
158
  model_result_filepaths = []
159
+
160
  for root, _, files in os.walk(results_path):
161
+ print(files)
162
  # We should only have json files in model results
163
  if len(files) == 0 or any([not f.endswith(".json") for f in files]):
164
  continue
 
186
  eval_results[eval_name] = eval_result
187
 
188
  results = []
189
+ # print(eval_results)
190
  for v in eval_results.values():
191
  try:
192
  v.to_dict() # we test if the dict version is complete
src/populate.py CHANGED
@@ -10,10 +10,13 @@ from src.leaderboard.read_evals import get_raw_eval_results
10
 
11
  def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
  raw_data = get_raw_eval_results(results_path, requests_path)
 
13
  all_data_json = [v.to_dict() for v in raw_data]
14
 
15
  df = pd.DataFrame.from_records(all_data_json)
16
  df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
 
 
17
  df = df[cols].round(decimals=2)
18
 
19
  # filter out if any of the benchmarks have not been produced
 
10
 
11
  def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
  raw_data = get_raw_eval_results(results_path, requests_path)
13
+
14
  all_data_json = [v.to_dict() for v in raw_data]
15
 
16
  df = pd.DataFrame.from_records(all_data_json)
17
  df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
18
+ print(df)
19
+ print(cols)
20
  df = df[cols].round(decimals=2)
21
 
22
  # filter out if any of the benchmarks have not been produced
src/submission/submit.py CHANGED
@@ -3,7 +3,7 @@ import os
3
  from datetime import datetime, timezone
4
 
5
  from src.display.formatting import styled_error, styled_message, styled_warning
6
- from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
7
  from src.submission.check_validity import (
8
  already_submitted_models,
9
  check_model_card,
@@ -14,6 +14,32 @@ from src.submission.check_validity import (
14
  REQUESTED_MODELS = None
15
  USERS_TO_SUBMISSION_DATES = None
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  def add_new_eval(
18
  model: str,
19
  base_model: str,
 
3
  from datetime import datetime, timezone
4
 
5
  from src.display.formatting import styled_error, styled_message, styled_warning
6
+ from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO, EVAL_RESULTS_PATH
7
  from src.submission.check_validity import (
8
  already_submitted_models,
9
  check_model_card,
 
14
  REQUESTED_MODELS = None
15
  USERS_TO_SUBMISSION_DATES = None
16
 
17
+ def assert_upload(file_obj):
18
+ #TODO: assert the acc of file
19
+ return True
20
+ pass
21
+ def upload_file(file_obj):
22
+ flag = assert_upload(file_obj)
23
+
24
+
25
+ now = datetime.now()
26
+ timestamp_str = now.strftime("%Y-%m-%dT%H-%M-%S")
27
+ output_file = "results_"+timestamp_str+".json"
28
+
29
+ example = json.load(open(file_obj, "r"))
30
+ model_name = example["config"]["model_name"]
31
+ output_dir = os.path.join(EVAL_RESULTS_PATH,model_name)
32
+ os.makedirs(output_dir, exist_ok=True)
33
+
34
+ output_path = os.path.join(output_dir, output_file)
35
+
36
+ with open(file_obj, "r") as f:
37
+ content = f.read()
38
+ with open(output_path, "w") as f:
39
+ f.write(content)
40
+
41
+
42
+
43
  def add_new_eval(
44
  model: str,
45
  base_model: str,