Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
refactor: refactor the codes
Browse files- app.py +3 -2
- src/display/columns.py +3 -6
- src/display/gradio_listener.py +0 -53
- src/utils.py +38 -5
- tests/src/display/test_utils.py +1 -2
app.py
CHANGED
@@ -19,7 +19,8 @@ from src.loaders import (
|
|
19 |
load_eval_results
|
20 |
)
|
21 |
from src.utils import (
|
22 |
-
update_metric
|
|
|
23 |
)
|
24 |
from src.display.gradio_formatting import (
|
25 |
get_version_dropdown,
|
@@ -32,7 +33,7 @@ from src.display.gradio_formatting import (
|
|
32 |
get_revision_and_ts_checkbox,
|
33 |
get_leaderboard_table
|
34 |
)
|
35 |
-
|
36 |
|
37 |
def restart_space():
|
38 |
API.restart_space(repo_id=REPO_ID)
|
|
|
19 |
load_eval_results
|
20 |
)
|
21 |
from src.utils import (
|
22 |
+
update_metric,
|
23 |
+
set_listeners
|
24 |
)
|
25 |
from src.display.gradio_formatting import (
|
26 |
get_version_dropdown,
|
|
|
33 |
get_revision_and_ts_checkbox,
|
34 |
get_leaderboard_table
|
35 |
)
|
36 |
+
|
37 |
|
38 |
def restart_space():
|
39 |
API.restart_space(repo_id=REPO_ID)
|
src/display/columns.py
CHANGED
@@ -66,7 +66,7 @@ def get_default_auto_eval_column_dict():
|
|
66 |
return auto_eval_column_dict
|
67 |
|
68 |
|
69 |
-
def make_autoevalcolumn(cls_name
|
70 |
auto_eval_column_dict = get_default_auto_eval_column_dict()
|
71 |
# Leaderboard columns
|
72 |
for benchmark in benchmarks:
|
@@ -78,10 +78,8 @@ def make_autoevalcolumn(cls_name="QABenchmarks", benchmarks=QABenchmarks):
|
|
78 |
return make_dataclass(cls_name, auto_eval_column_dict, frozen=True)
|
79 |
|
80 |
|
81 |
-
AutoEvalColumnQA = make_autoevalcolumn(
|
82 |
-
|
83 |
-
AutoEvalColumnLongDoc = make_autoevalcolumn(
|
84 |
-
"AutoEvalColumnLongDoc", LongDocBenchmarks)
|
85 |
|
86 |
fixed_cols = get_default_auto_eval_column_dict()[:-3]
|
87 |
|
@@ -93,4 +91,3 @@ COLS_QA = [c.name for c in fields(AutoEvalColumnQA) if not c.hidden]
|
|
93 |
COLS_LONG_DOC = [c.name for c in fields(AutoEvalColumnLongDoc) if not c.hidden]
|
94 |
TYPES_QA = [c.type for c in fields(AutoEvalColumnQA) if not c.hidden]
|
95 |
TYPES_LONG_DOC = [c.type for c in fields(AutoEvalColumnLongDoc) if not c.hidden]
|
96 |
-
COLS_LITE = [c.name for c in fields(AutoEvalColumnQA) if c.displayed_by_default and not c.hidden]
|
|
|
66 |
return auto_eval_column_dict
|
67 |
|
68 |
|
69 |
+
def make_autoevalcolumn(cls_name, benchmarks):
|
70 |
auto_eval_column_dict = get_default_auto_eval_column_dict()
|
71 |
# Leaderboard columns
|
72 |
for benchmark in benchmarks:
|
|
|
78 |
return make_dataclass(cls_name, auto_eval_column_dict, frozen=True)
|
79 |
|
80 |
|
81 |
+
AutoEvalColumnQA = make_autoevalcolumn("AutoEvalColumnQA", QABenchmarks)
|
82 |
+
AutoEvalColumnLongDoc = make_autoevalcolumn("AutoEvalColumnLongDoc", LongDocBenchmarks)
|
|
|
|
|
83 |
|
84 |
fixed_cols = get_default_auto_eval_column_dict()[:-3]
|
85 |
|
|
|
91 |
COLS_LONG_DOC = [c.name for c in fields(AutoEvalColumnLongDoc) if not c.hidden]
|
92 |
TYPES_QA = [c.type for c in fields(AutoEvalColumnQA) if not c.hidden]
|
93 |
TYPES_LONG_DOC = [c.type for c in fields(AutoEvalColumnLongDoc) if not c.hidden]
|
|
src/display/gradio_listener.py
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
from src.utils import update_table, update_table_long_doc
|
2 |
-
|
3 |
-
|
4 |
-
def set_listeners(
|
5 |
-
task,
|
6 |
-
displayed_leaderboard,
|
7 |
-
hidden_leaderboard,
|
8 |
-
search_bar,
|
9 |
-
selected_domains,
|
10 |
-
selected_langs,
|
11 |
-
selected_rerankings,
|
12 |
-
show_anonymous,
|
13 |
-
show_revision_and_timestamp,
|
14 |
-
|
15 |
-
):
|
16 |
-
if task == "qa":
|
17 |
-
update_table_func = update_table
|
18 |
-
elif task == "long-doc":
|
19 |
-
update_table_func = update_table_long_doc
|
20 |
-
else:
|
21 |
-
raise NotImplementedError
|
22 |
-
# Set search_bar listener
|
23 |
-
search_bar.submit(
|
24 |
-
update_table_func,
|
25 |
-
[
|
26 |
-
hidden_leaderboard, # hidden_leaderboard_table_for_search,
|
27 |
-
selected_domains,
|
28 |
-
selected_langs,
|
29 |
-
selected_rerankings,
|
30 |
-
search_bar,
|
31 |
-
show_anonymous,
|
32 |
-
],
|
33 |
-
displayed_leaderboard
|
34 |
-
)
|
35 |
-
|
36 |
-
# Set column-wise listener
|
37 |
-
for selector in [
|
38 |
-
selected_domains, selected_langs, show_anonymous, show_revision_and_timestamp, selected_rerankings
|
39 |
-
]:
|
40 |
-
selector.change(
|
41 |
-
update_table_func,
|
42 |
-
[
|
43 |
-
hidden_leaderboard,
|
44 |
-
selected_domains,
|
45 |
-
selected_langs,
|
46 |
-
selected_rerankings,
|
47 |
-
search_bar,
|
48 |
-
show_anonymous,
|
49 |
-
show_revision_and_timestamp
|
50 |
-
],
|
51 |
-
displayed_leaderboard,
|
52 |
-
queue=True,
|
53 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils.py
CHANGED
@@ -96,9 +96,6 @@ def get_default_cols(task: str, columns: list=[], add_fix_cols: bool=True) -> li
|
|
96 |
return cols, types
|
97 |
|
98 |
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
def select_columns(
|
103 |
df: pd.DataFrame,
|
104 |
domain_query: list,
|
@@ -162,7 +159,8 @@ def update_table(
|
|
162 |
reset_ranking: bool = True
|
163 |
):
|
164 |
return _update_table(
|
165 |
-
"qa",
|
|
|
166 |
|
167 |
|
168 |
def update_table_long_doc(
|
@@ -177,7 +175,8 @@ def update_table_long_doc(
|
|
177 |
|
178 |
):
|
179 |
return _update_table(
|
180 |
-
"long-doc",
|
|
|
181 |
|
182 |
|
183 |
def update_metric(
|
@@ -360,3 +359,37 @@ def get_leaderboard_df(raw_data: List[FullEvalResult], task: str, metric: str) -
|
|
360 |
# # replace "0" with "-" for average score
|
361 |
# df[COL_NAME_AVG] = df[COL_NAME_AVG].replace(0, "-")
|
362 |
return df
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
return cols, types
|
97 |
|
98 |
|
|
|
|
|
|
|
99 |
def select_columns(
|
100 |
df: pd.DataFrame,
|
101 |
domain_query: list,
|
|
|
159 |
reset_ranking: bool = True
|
160 |
):
|
161 |
return _update_table(
|
162 |
+
"qa",
|
163 |
+
hidden_df, domains, langs, reranking_query, query, show_anonymous, reset_ranking, show_revision_and_timestamp)
|
164 |
|
165 |
|
166 |
def update_table_long_doc(
|
|
|
175 |
|
176 |
):
|
177 |
return _update_table(
|
178 |
+
"long-doc",
|
179 |
+
hidden_df, domains, langs, reranking_query, query, show_anonymous, reset_ranking, show_revision_and_timestamp)
|
180 |
|
181 |
|
182 |
def update_metric(
|
|
|
359 |
# # replace "0" with "-" for average score
|
360 |
# df[COL_NAME_AVG] = df[COL_NAME_AVG].replace(0, "-")
|
361 |
return df
|
362 |
+
|
363 |
+
|
364 |
+
def set_listeners(
|
365 |
+
task,
|
366 |
+
target_df,
|
367 |
+
source_df,
|
368 |
+
search_bar,
|
369 |
+
selected_domains,
|
370 |
+
selected_langs,
|
371 |
+
selected_rerankings,
|
372 |
+
show_anonymous,
|
373 |
+
show_revision_and_timestamp,
|
374 |
+
):
|
375 |
+
if task == "qa":
|
376 |
+
update_table_func = update_table
|
377 |
+
elif task == "long-doc":
|
378 |
+
update_table_func = update_table_long_doc
|
379 |
+
else:
|
380 |
+
raise NotImplementedError
|
381 |
+
selector_list = [
|
382 |
+
selected_domains,
|
383 |
+
selected_langs,
|
384 |
+
selected_rerankings,
|
385 |
+
search_bar,
|
386 |
+
show_anonymous
|
387 |
+
]
|
388 |
+
search_bar_args = [source_df,] + selector_list
|
389 |
+
selector_args = search_bar_args + [show_revision_and_timestamp,]
|
390 |
+
# Set search_bar listener
|
391 |
+
search_bar.submit(update_table_func, search_bar_args, target_df)
|
392 |
+
|
393 |
+
# Set column-wise listener
|
394 |
+
for selector in selector_list:
|
395 |
+
selector.change(update_table_func, selector_args, target_df, queue=True,)
|
tests/src/display/test_utils.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import pytest
|
2 |
-
from src.display.utils import fields, AutoEvalColumnQA, COLS_QA, COLS_LONG_DOC,
|
3 |
|
4 |
|
5 |
def test_fields():
|
@@ -10,7 +10,6 @@ def test_fields():
|
|
10 |
def test_macro_variables():
|
11 |
print(f'COLS_QA: {COLS_QA}')
|
12 |
print(f'COLS_LONG_DOC: {COLS_LONG_DOC}')
|
13 |
-
print(f'COLS_LITE: {COLS_LITE}')
|
14 |
print(f'TYPES_QA: {TYPES_QA}')
|
15 |
print(f'TYPES_LONG_DOC: {TYPES_LONG_DOC}')
|
16 |
|
|
|
1 |
import pytest
|
2 |
+
from src.display.utils import fields, AutoEvalColumnQA, COLS_QA, COLS_LONG_DOC, TYPES_QA, TYPES_LONG_DOC, get_default_auto_eval_column_dict
|
3 |
|
4 |
|
5 |
def test_fields():
|
|
|
10 |
def test_macro_variables():
|
11 |
print(f'COLS_QA: {COLS_QA}')
|
12 |
print(f'COLS_LONG_DOC: {COLS_LONG_DOC}')
|
|
|
13 |
print(f'TYPES_QA: {TYPES_QA}')
|
14 |
print(f'TYPES_LONG_DOC: {TYPES_LONG_DOC}')
|
15 |
|