Spaces:
Running
Running
Paul Hager
commited on
Commit
·
0e12557
1
Parent(s):
dd4dcf8
Adjusted the visible configs
Browse files- src/display/utils.py +8 -6
- src/leaderboard/read_evals.py +13 -4
src/display/utils.py
CHANGED
@@ -23,20 +23,22 @@ class ColumnContent:
|
|
23 |
## Leaderboard columns
|
24 |
auto_eval_column_dict = []
|
25 |
# Init
|
26 |
-
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
27 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
28 |
#Scores
|
29 |
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
30 |
for task in Tasks:
|
31 |
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
32 |
# Model information
|
33 |
-
auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
|
34 |
auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
|
35 |
-
auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
|
36 |
-
auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
|
37 |
-
auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
|
38 |
auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
|
39 |
-
auto_eval_column_dict.append(["
|
|
|
|
|
40 |
auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
41 |
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
42 |
|
|
|
23 |
## Leaderboard columns
|
24 |
auto_eval_column_dict = []
|
25 |
# Init
|
26 |
+
# auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
27 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
28 |
#Scores
|
29 |
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
30 |
for task in Tasks:
|
31 |
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
32 |
# Model information
|
33 |
+
# auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
|
34 |
auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
|
35 |
+
# auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
|
36 |
+
# auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
|
37 |
+
# auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
|
38 |
auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
|
39 |
+
auto_eval_column_dict.append(["seq_length", ColumnContent, ColumnContent("Max Sequence Length", "number", False)])
|
40 |
+
auto_eval_column_dict.append(["model_quantization_bits", ColumnContent, ColumnContent("Quantization Bits", "number", False)])
|
41 |
+
# auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
|
42 |
auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
43 |
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
44 |
|
src/leaderboard/read_evals.py
CHANGED
@@ -29,6 +29,8 @@ class EvalResult:
|
|
29 |
license: str = "?"
|
30 |
likes: int = 0
|
31 |
num_params: int = 0
|
|
|
|
|
32 |
date: str = "" # submission date of request file
|
33 |
still_on_hub: bool = False
|
34 |
|
@@ -79,6 +81,11 @@ class EvalResult:
|
|
79 |
mean_acc = np.mean(accs) * 100.0
|
80 |
results[task.benchmark] = mean_acc
|
81 |
|
|
|
|
|
|
|
|
|
|
|
82 |
return self(
|
83 |
eval_name=result_key,
|
84 |
full_model=full_model,
|
@@ -102,7 +109,7 @@ class EvalResult:
|
|
102 |
self.weight_type = WeightType[request.get("weight_type", "Original")]
|
103 |
self.license = request.get("license", "?")
|
104 |
self.likes = request.get("likes", 0)
|
105 |
-
self.
|
106 |
self.date = request.get("submitted_time", "")
|
107 |
except Exception:
|
108 |
print(
|
@@ -122,9 +129,11 @@ class EvalResult:
|
|
122 |
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
|
123 |
AutoEvalColumn.revision.name: self.revision,
|
124 |
AutoEvalColumn.average.name: average,
|
125 |
-
AutoEvalColumn.license.name: self.license,
|
126 |
-
AutoEvalColumn.likes.name: self.likes,
|
127 |
-
AutoEvalColumn.params.name: self.
|
|
|
|
|
128 |
AutoEvalColumn.still_on_hub.name: self.still_on_hub,
|
129 |
}
|
130 |
|
|
|
29 |
license: str = "?"
|
30 |
likes: int = 0
|
31 |
num_params: int = 0
|
32 |
+
seq_length: int = 0
|
33 |
+
model_quantization_bits: int = 0
|
34 |
date: str = "" # submission date of request file
|
35 |
still_on_hub: bool = False
|
36 |
|
|
|
81 |
mean_acc = np.mean(accs) * 100.0
|
82 |
results[task.benchmark] = mean_acc
|
83 |
|
84 |
+
self.params = config.get("params", 0)
|
85 |
+
self.seq_length = config.get("max_sequence_length", 0)
|
86 |
+
self.model_quantization_bits = config.get("model_quantization_bits", 0)
|
87 |
+
|
88 |
+
|
89 |
return self(
|
90 |
eval_name=result_key,
|
91 |
full_model=full_model,
|
|
|
109 |
self.weight_type = WeightType[request.get("weight_type", "Original")]
|
110 |
self.license = request.get("license", "?")
|
111 |
self.likes = request.get("likes", 0)
|
112 |
+
self.params = request.get("params", 0)
|
113 |
self.date = request.get("submitted_time", "")
|
114 |
except Exception:
|
115 |
print(
|
|
|
129 |
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
|
130 |
AutoEvalColumn.revision.name: self.revision,
|
131 |
AutoEvalColumn.average.name: average,
|
132 |
+
# AutoEvalColumn.license.name: self.license,
|
133 |
+
# AutoEvalColumn.likes.name: self.likes,
|
134 |
+
AutoEvalColumn.params.name: self.params,
|
135 |
+
AutoEvalColumn.seq_length.name: self.seq_length,
|
136 |
+
AutoEvalColumn.model_quantization_bits.name: self.model_quantization_bits,
|
137 |
AutoEvalColumn.still_on_hub.name: self.still_on_hub,
|
138 |
}
|
139 |
|