xeon27
commited on
Commit
Β·
a9a4909
1
Parent(s):
ba2f546
Remove filters and extra columns
Browse files- app.py +111 -110
- src/display/utils.py +11 -11
app.py
CHANGED
|
@@ -70,20 +70,21 @@ def init_leaderboard(dataframe):
|
|
| 70 |
),
|
| 71 |
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
|
| 72 |
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
|
| 73 |
-
filter_columns=[
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
],
|
|
|
|
| 87 |
bool_checkboxgroup_label="Hide models",
|
| 88 |
interactive=False,
|
| 89 |
)
|
|
@@ -101,102 +102,102 @@ with demo:
|
|
| 101 |
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=2):
|
| 102 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
| 103 |
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
with gr.Row():
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
|
| 201 |
scheduler = BackgroundScheduler()
|
| 202 |
scheduler.add_job(restart_space, "interval", seconds=1800)
|
|
|
|
| 70 |
),
|
| 71 |
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
|
| 72 |
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
|
| 73 |
+
# filter_columns=[
|
| 74 |
+
# ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
|
| 75 |
+
# ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
|
| 76 |
+
# ColumnFilter(
|
| 77 |
+
# AutoEvalColumn.params.name,
|
| 78 |
+
# type="slider",
|
| 79 |
+
# min=0.01,
|
| 80 |
+
# max=150,
|
| 81 |
+
# label="Select the number of parameters (B)",
|
| 82 |
+
# ),
|
| 83 |
+
# ColumnFilter(
|
| 84 |
+
# AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=False
|
| 85 |
+
# ),
|
| 86 |
+
# ],
|
| 87 |
+
filter_columns=[],
|
| 88 |
bool_checkboxgroup_label="Hide models",
|
| 89 |
interactive=False,
|
| 90 |
)
|
|
|
|
| 102 |
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=2):
|
| 103 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
| 104 |
|
| 105 |
+
# with gr.TabItem("π Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|
| 106 |
+
# with gr.Column():
|
| 107 |
+
# with gr.Row():
|
| 108 |
+
# gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
| 109 |
+
|
| 110 |
+
# with gr.Column():
|
| 111 |
+
# with gr.Accordion(
|
| 112 |
+
# f"β
Finished Evaluations ({len(finished_eval_queue_df)})",
|
| 113 |
+
# open=False,
|
| 114 |
+
# ):
|
| 115 |
+
# with gr.Row():
|
| 116 |
+
# finished_eval_table = gr.components.Dataframe(
|
| 117 |
+
# value=finished_eval_queue_df,
|
| 118 |
+
# headers=EVAL_COLS,
|
| 119 |
+
# datatype=EVAL_TYPES,
|
| 120 |
+
# row_count=5,
|
| 121 |
+
# )
|
| 122 |
+
# with gr.Accordion(
|
| 123 |
+
# f"π Running Evaluation Queue ({len(running_eval_queue_df)})",
|
| 124 |
+
# open=False,
|
| 125 |
+
# ):
|
| 126 |
+
# with gr.Row():
|
| 127 |
+
# running_eval_table = gr.components.Dataframe(
|
| 128 |
+
# value=running_eval_queue_df,
|
| 129 |
+
# headers=EVAL_COLS,
|
| 130 |
+
# datatype=EVAL_TYPES,
|
| 131 |
+
# row_count=5,
|
| 132 |
+
# )
|
| 133 |
+
|
| 134 |
+
# with gr.Accordion(
|
| 135 |
+
# f"β³ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
|
| 136 |
+
# open=False,
|
| 137 |
+
# ):
|
| 138 |
+
# with gr.Row():
|
| 139 |
+
# pending_eval_table = gr.components.Dataframe(
|
| 140 |
+
# value=pending_eval_queue_df,
|
| 141 |
+
# headers=EVAL_COLS,
|
| 142 |
+
# datatype=EVAL_TYPES,
|
| 143 |
+
# row_count=5,
|
| 144 |
+
# )
|
| 145 |
+
# with gr.Row():
|
| 146 |
+
# gr.Markdown("# βοΈβ¨ Submit your model here!", elem_classes="markdown-text")
|
| 147 |
+
|
| 148 |
+
# with gr.Row():
|
| 149 |
+
# with gr.Column():
|
| 150 |
+
# model_name_textbox = gr.Textbox(label="Model name")
|
| 151 |
+
# revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
|
| 152 |
+
# model_type = gr.Dropdown(
|
| 153 |
+
# choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
|
| 154 |
+
# label="Model type",
|
| 155 |
+
# multiselect=False,
|
| 156 |
+
# value=None,
|
| 157 |
+
# interactive=True,
|
| 158 |
+
# )
|
| 159 |
+
|
| 160 |
+
# with gr.Column():
|
| 161 |
+
# precision = gr.Dropdown(
|
| 162 |
+
# choices=[i.value.name for i in Precision if i != Precision.Unknown],
|
| 163 |
+
# label="Precision",
|
| 164 |
+
# multiselect=False,
|
| 165 |
+
# value="float16",
|
| 166 |
+
# interactive=True,
|
| 167 |
+
# )
|
| 168 |
+
# weight_type = gr.Dropdown(
|
| 169 |
+
# choices=[i.value.name for i in WeightType],
|
| 170 |
+
# label="Weights type",
|
| 171 |
+
# multiselect=False,
|
| 172 |
+
# value="Original",
|
| 173 |
+
# interactive=True,
|
| 174 |
+
# )
|
| 175 |
+
# base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
|
| 176 |
+
|
| 177 |
+
# submit_button = gr.Button("Submit Eval")
|
| 178 |
+
# submission_result = gr.Markdown()
|
| 179 |
+
# submit_button.click(
|
| 180 |
+
# add_new_eval,
|
| 181 |
+
# [
|
| 182 |
+
# model_name_textbox,
|
| 183 |
+
# base_model_name_textbox,
|
| 184 |
+
# revision_name_textbox,
|
| 185 |
+
# precision,
|
| 186 |
+
# weight_type,
|
| 187 |
+
# model_type,
|
| 188 |
+
# ],
|
| 189 |
+
# submission_result,
|
| 190 |
+
# )
|
| 191 |
+
|
| 192 |
+
# with gr.Row():
|
| 193 |
+
# with gr.Accordion("π Citation", open=False):
|
| 194 |
+
# citation_button = gr.Textbox(
|
| 195 |
+
# value=CITATION_BUTTON_TEXT,
|
| 196 |
+
# label=CITATION_BUTTON_LABEL,
|
| 197 |
+
# lines=20,
|
| 198 |
+
# elem_id="citation-button",
|
| 199 |
+
# show_copy_button=True,
|
| 200 |
+
# )
|
| 201 |
|
| 202 |
scheduler = BackgroundScheduler()
|
| 203 |
scheduler.add_job(restart_space, "interval", seconds=1800)
|
src/display/utils.py
CHANGED
|
@@ -23,22 +23,22 @@ class ColumnContent:
|
|
| 23 |
## Leaderboard columns
|
| 24 |
auto_eval_column_dict = []
|
| 25 |
# Init
|
| 26 |
-
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
| 27 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
| 28 |
#Scores
|
| 29 |
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average β¬οΈ", "number", True)])
|
| 30 |
for task in Tasks:
|
| 31 |
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
| 32 |
-
# Model information
|
| 33 |
-
auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
|
| 34 |
-
auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
|
| 35 |
-
auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
|
| 36 |
-
auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
|
| 37 |
-
auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
|
| 38 |
-
auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
|
| 39 |
-
auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub β€οΈ", "number", False)])
|
| 40 |
-
auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
| 41 |
-
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
| 42 |
|
| 43 |
# We use make dataclass to dynamically fill the scores from Tasks
|
| 44 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
|
|
|
| 23 |
## Leaderboard columns
|
| 24 |
auto_eval_column_dict = []
|
| 25 |
# Init
|
| 26 |
+
# auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
| 27 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
| 28 |
#Scores
|
| 29 |
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average β¬οΈ", "number", True)])
|
| 30 |
for task in Tasks:
|
| 31 |
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
| 32 |
+
# # Model information
|
| 33 |
+
# auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
|
| 34 |
+
# auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
|
| 35 |
+
# auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
|
| 36 |
+
# auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
|
| 37 |
+
# auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
|
| 38 |
+
# auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
|
| 39 |
+
# auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub β€οΈ", "number", False)])
|
| 40 |
+
# auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
| 41 |
+
# auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
| 42 |
|
| 43 |
# We use make dataclass to dynamically fill the scores from Tasks
|
| 44 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|