Spaces:
Running
Running
[email protected]
commited on
Commit
·
cba6080
1
Parent(s):
993a3d2
update
Browse files- app.py +6 -0
- src/about.py +12 -0
- src/display/utils.py +29 -20
app.py
CHANGED
@@ -16,6 +16,7 @@ from src.display.css_html_js import custom_css
|
|
16 |
from src.display.utils import (
|
17 |
BENCHMARK_COLS,
|
18 |
COLS,
|
|
|
19 |
EVAL_COLS,
|
20 |
EVAL_TYPES,
|
21 |
AutoEvalColumn,
|
@@ -52,6 +53,11 @@ except Exception:
|
|
52 |
|
53 |
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
54 |
|
|
|
|
|
|
|
|
|
|
|
55 |
(
|
56 |
finished_eval_queue_df,
|
57 |
running_eval_queue_df,
|
|
|
16 |
from src.display.utils import (
|
17 |
BENCHMARK_COLS,
|
18 |
COLS,
|
19 |
+
ASSET_COLS,
|
20 |
EVAL_COLS,
|
21 |
EVAL_TYPES,
|
22 |
AutoEvalColumn,
|
|
|
53 |
|
54 |
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
55 |
|
56 |
+
print(ASSET_COLS)
|
57 |
+
|
58 |
+
ASSET_LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, ASSET_COLS, BENCHMARK_COLS)
|
59 |
+
|
60 |
+
|
61 |
(
|
62 |
finished_eval_queue_df,
|
63 |
running_eval_queue_df,
|
src/about.py
CHANGED
@@ -19,6 +19,18 @@ class Tasks(Enum):
|
|
19 |
task4 = Task("score_consistency", "consist_score", "Consistency_Score")
|
20 |
task5 = Task("uncertainty", "uncertainty_score", "Uncertainty_Score")
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
NUM_FEWSHOT = 0 # Change with your few shot
|
24 |
# ---------------------------------------------------
|
|
|
19 |
task4 = Task("score_consistency", "consist_score", "Consistency_Score")
|
20 |
task5 = Task("uncertainty", "uncertainty_score", "Uncertainty_Score")
|
21 |
|
22 |
+
class AssetTasks(Enum):
|
23 |
+
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
|
24 |
+
task0 = Task("acc_electric_motor", "acc_electric_motor", "acc_electric_motor")
|
25 |
+
task1 = Task("acc_steam_turbine", "acc_steam_turbine", "acc_steam_turbine")
|
26 |
+
task2 = Task("acc_aero_gas_turbine", "acc_aero_gas_turbine", "acc_aero_gas_turbine")
|
27 |
+
task3 = Task("acc_industrial_gas_turbine", "acc_industrial_gas_turbine", "acc_industrial_gas_turbine")
|
28 |
+
task4 = Task("acc_pump", "acc_pump", "acc_pump")
|
29 |
+
task5 = Task("acc_compressor", "acc_compressor", "acc_compressor")
|
30 |
+
task6 = Task("acc_reciprocating_internal_combustion_engine", "acc_reciprocating_internal_combustion_engine", "acc_reciprocating_internal_combustion_engine")
|
31 |
+
task7 = Task("acc_electric_generator", "acc_electric_generator", "acc_electric_generator")
|
32 |
+
task8 = Task("acc_fan", "acc_fan", "acc_fan")
|
33 |
+
task9 = Task("acc_power_transformer", "acc_power_transformer", "acc_power_transformer")
|
34 |
|
35 |
NUM_FEWSHOT = 0 # Change with your few shot
|
36 |
# ---------------------------------------------------
|
src/display/utils.py
CHANGED
@@ -3,7 +3,7 @@ from enum import Enum
|
|
3 |
|
4 |
import pandas as pd
|
5 |
|
6 |
-
from src.about import Tasks
|
7 |
|
8 |
def fields(raw_class):
|
9 |
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
|
@@ -21,28 +21,36 @@ class ColumnContent:
|
|
21 |
never_hidden: bool = False
|
22 |
|
23 |
## Leaderboard columns
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
auto_eval_column_dict.append(["
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
auto_eval_column_dict.append(["
|
35 |
-
auto_eval_column_dict.append(["
|
36 |
-
auto_eval_column_dict.append(["
|
37 |
-
auto_eval_column_dict.append(["
|
38 |
-
auto_eval_column_dict.append(["
|
39 |
-
auto_eval_column_dict.append(["
|
40 |
-
auto_eval_column_dict.append(["
|
41 |
-
auto_eval_column_dict.append(["
|
42 |
-
|
|
|
|
|
|
|
43 |
# We use make dataclass to dynamically fill the scores from Tasks
|
44 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
45 |
|
|
|
|
|
|
|
|
|
|
|
46 |
## For the queue columns in the submission tab
|
47 |
@dataclass(frozen=True)
|
48 |
class EvalQueueColumn: # Queue column
|
@@ -102,6 +110,7 @@ class Precision(Enum):
|
|
102 |
|
103 |
# Column selection
|
104 |
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
|
|
105 |
|
106 |
EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
|
107 |
EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
|
|
|
3 |
|
4 |
import pandas as pd
|
5 |
|
6 |
+
from src.about import Tasks, AssetTasks
|
7 |
|
8 |
def fields(raw_class):
|
9 |
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
|
|
|
21 |
never_hidden: bool = False
|
22 |
|
23 |
## Leaderboard columns
|
24 |
+
def get_auto_eval_column_dict(task_class):
|
25 |
+
auto_eval_column_dict = []
|
26 |
+
# Init
|
27 |
+
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
28 |
+
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
29 |
+
#Scores
|
30 |
+
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
31 |
+
for task in task_class:
|
32 |
+
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
33 |
+
# Model information
|
34 |
+
auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
|
35 |
+
auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
|
36 |
+
auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
|
37 |
+
auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
|
38 |
+
auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
|
39 |
+
auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
|
40 |
+
auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
|
41 |
+
auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
42 |
+
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
43 |
+
return auto_eval_column_dict
|
44 |
+
|
45 |
+
auto_eval_column_dict = get_auto_eval_column_dict(Tasks)
|
46 |
# We use make dataclass to dynamically fill the scores from Tasks
|
47 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
48 |
|
49 |
+
auto_eval_column_asset_dict = get_auto_eval_column_dict(AssetTasks)
|
50 |
+
# We use make dataclass to dynamically fill the scores from Tasks
|
51 |
+
AutoEvalColumnAsset = make_dataclass("AutoEvalColumnAsset", auto_eval_column_asset_dict, frozen=True)
|
52 |
+
|
53 |
+
|
54 |
## For the queue columns in the submission tab
|
55 |
@dataclass(frozen=True)
|
56 |
class EvalQueueColumn: # Queue column
|
|
|
110 |
|
111 |
# Column selection
|
112 |
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
113 |
+
ASSET_COLS = [c.name for c in fields(AutoEvalColumnAsset) if not c.hidden]
|
114 |
|
115 |
EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
|
116 |
EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
|