Spaces:
Runtime error
Runtime error
Commit
·
d7ea26f
1
Parent(s):
03110b5
Test commit
Browse files- src/display/utils.py +10 -5
src/display/utils.py
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
from dataclasses import dataclass, make_dataclass
|
2 |
from enum import Enum
|
3 |
|
4 |
-
import pandas as pd
|
5 |
|
6 |
from src.about import Tasks
|
7 |
|
|
|
8 |
def fields(raw_class):
|
9 |
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
|
10 |
|
@@ -20,13 +20,14 @@ class ColumnContent:
|
|
20 |
hidden: bool = False
|
21 |
never_hidden: bool = False
|
22 |
|
|
|
23 |
## Leaderboard columns
|
24 |
auto_eval_column_dict = []
|
25 |
# Init
|
26 |
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
27 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
28 |
-
#Scores
|
29 |
-
auto_eval_column_dict.append(["
|
30 |
for task in Tasks:
|
31 |
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
32 |
# Model information
|
@@ -43,6 +44,7 @@ auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sh
|
|
43 |
# We use make dataclass to dynamically fill the scores from Tasks
|
44 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
45 |
|
|
|
46 |
## For the queue columns in the submission tab
|
47 |
@dataclass(frozen=True)
|
48 |
class EvalQueueColumn: # Queue column
|
@@ -53,12 +55,13 @@ class EvalQueueColumn: # Queue column
|
|
53 |
weight_type = ColumnContent("weight_type", "str", "Original")
|
54 |
status = ColumnContent("status", "str", True)
|
55 |
|
|
|
56 |
## All the model information that we might need
|
57 |
@dataclass
|
58 |
class ModelDetails:
|
59 |
name: str
|
60 |
display_name: str = ""
|
61 |
-
symbol: str = ""
|
62 |
|
63 |
|
64 |
class ModelType(Enum):
|
@@ -83,11 +86,13 @@ class ModelType(Enum):
|
|
83 |
return ModelType.IFT
|
84 |
return ModelType.Unknown
|
85 |
|
|
|
86 |
class WeightType(Enum):
|
87 |
Adapter = ModelDetails("Adapter")
|
88 |
Original = ModelDetails("Original")
|
89 |
Delta = ModelDetails("Delta")
|
90 |
|
|
|
91 |
class Precision(Enum):
|
92 |
float16 = ModelDetails("float16")
|
93 |
bfloat16 = ModelDetails("bfloat16")
|
@@ -100,6 +105,7 @@ class Precision(Enum):
|
|
100 |
return Precision.bfloat16
|
101 |
return Precision.Unknown
|
102 |
|
|
|
103 |
# Column selection
|
104 |
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
105 |
|
@@ -107,4 +113,3 @@ EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
|
|
107 |
EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
|
108 |
|
109 |
BENCHMARK_COLS = [t.value.col_name for t in Tasks]
|
110 |
-
|
|
|
1 |
from dataclasses import dataclass, make_dataclass
|
2 |
from enum import Enum
|
3 |
|
|
|
4 |
|
5 |
from src.about import Tasks
|
6 |
|
7 |
+
|
8 |
def fields(raw_class):
|
9 |
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
|
10 |
|
|
|
20 |
hidden: bool = False
|
21 |
never_hidden: bool = False
|
22 |
|
23 |
+
|
24 |
## Leaderboard columns
|
25 |
auto_eval_column_dict = []
|
26 |
# Init
|
27 |
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
28 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
29 |
+
# Scores
|
30 |
+
auto_eval_column_dict.append(["mean_score", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
31 |
for task in Tasks:
|
32 |
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
33 |
# Model information
|
|
|
44 |
# We use make dataclass to dynamically fill the scores from Tasks
|
45 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
46 |
|
47 |
+
|
48 |
## For the queue columns in the submission tab
|
49 |
@dataclass(frozen=True)
|
50 |
class EvalQueueColumn: # Queue column
|
|
|
55 |
weight_type = ColumnContent("weight_type", "str", "Original")
|
56 |
status = ColumnContent("status", "str", True)
|
57 |
|
58 |
+
|
59 |
## All the model information that we might need
|
60 |
@dataclass
|
61 |
class ModelDetails:
|
62 |
name: str
|
63 |
display_name: str = ""
|
64 |
+
symbol: str = "" # emoji
|
65 |
|
66 |
|
67 |
class ModelType(Enum):
|
|
|
86 |
return ModelType.IFT
|
87 |
return ModelType.Unknown
|
88 |
|
89 |
+
|
90 |
class WeightType(Enum):
|
91 |
Adapter = ModelDetails("Adapter")
|
92 |
Original = ModelDetails("Original")
|
93 |
Delta = ModelDetails("Delta")
|
94 |
|
95 |
+
|
96 |
class Precision(Enum):
|
97 |
float16 = ModelDetails("float16")
|
98 |
bfloat16 = ModelDetails("bfloat16")
|
|
|
105 |
return Precision.bfloat16
|
106 |
return Precision.Unknown
|
107 |
|
108 |
+
|
109 |
# Column selection
|
110 |
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
111 |
|
|
|
113 |
EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
|
114 |
|
115 |
BENCHMARK_COLS = [t.value.col_name for t in Tasks]
|
|