Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -66,12 +66,12 @@ def init_leaderboard(dataframe):
|
|
66 |
select_columns=SelectColumns(
|
67 |
default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
|
68 |
cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
|
69 |
-
label="
|
70 |
),
|
71 |
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
|
72 |
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
|
73 |
filter_columns=[
|
74 |
-
ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="
|
75 |
ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
|
76 |
ColumnFilter(
|
77 |
AutoEvalColumn.params.name,
|
@@ -84,7 +84,7 @@ def init_leaderboard(dataframe):
|
|
84 |
AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
|
85 |
),
|
86 |
],
|
87 |
-
bool_checkboxgroup_label="
|
88 |
interactive=False,
|
89 |
)
|
90 |
|
@@ -95,20 +95,20 @@ with demo:
|
|
95 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
96 |
|
97 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
98 |
-
with gr.TabItem("๐
LLM
|
99 |
leaderboard = init_leaderboard(LEADERBOARD_DF)
|
100 |
|
101 |
with gr.TabItem("๐ About", elem_id="llm-benchmark-tab-table", id=2):
|
102 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
103 |
|
104 |
-
with gr.TabItem("๐
|
105 |
with gr.Column():
|
106 |
with gr.Row():
|
107 |
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
108 |
|
109 |
with gr.Column():
|
110 |
with gr.Accordion(
|
111 |
-
f"โ
|
112 |
open=False,
|
113 |
):
|
114 |
with gr.Row():
|
@@ -119,7 +119,7 @@ with demo:
|
|
119 |
row_count=5,
|
120 |
)
|
121 |
with gr.Accordion(
|
122 |
-
f"๐
|
123 |
open=False,
|
124 |
):
|
125 |
with gr.Row():
|
@@ -142,15 +142,15 @@ with demo:
|
|
142 |
row_count=5,
|
143 |
)
|
144 |
with gr.Row():
|
145 |
-
gr.Markdown("# โ๏ธโจ
|
146 |
|
147 |
with gr.Row():
|
148 |
with gr.Column():
|
149 |
-
model_name_textbox = gr.Textbox(label="
|
150 |
-
revision_name_textbox = gr.Textbox(label="
|
151 |
model_type = gr.Dropdown(
|
152 |
choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
|
153 |
-
label="
|
154 |
multiselect=False,
|
155 |
value=None,
|
156 |
interactive=True,
|
@@ -166,14 +166,14 @@ with demo:
|
|
166 |
)
|
167 |
weight_type = gr.Dropdown(
|
168 |
choices=[i.value.name for i in WeightType],
|
169 |
-
label="
|
170 |
multiselect=False,
|
171 |
value="Original",
|
172 |
interactive=True,
|
173 |
)
|
174 |
base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
|
175 |
|
176 |
-
submit_button = gr.Button("
|
177 |
submission_result = gr.Markdown()
|
178 |
submit_button.click(
|
179 |
add_new_eval,
|
|
|
66 |
select_columns=SelectColumns(
|
67 |
default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
|
68 |
cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
|
69 |
+
label="ใใฃในใใฌใคใซ่กจ็คบใใ่กใ้ธๆใ",
|
70 |
),
|
71 |
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
|
72 |
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
|
73 |
filter_columns=[
|
74 |
+
ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="ใขใใซใฟใคใ"),
|
75 |
ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
|
76 |
ColumnFilter(
|
77 |
AutoEvalColumn.params.name,
|
|
|
84 |
AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
|
85 |
),
|
86 |
],
|
87 |
+
bool_checkboxgroup_label="ใขใใซใ้ ใ",
|
88 |
interactive=False,
|
89 |
)
|
90 |
|
|
|
95 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
96 |
|
97 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
98 |
+
with gr.TabItem("๐
LLM ใใณใใใผใฏ", elem_id="llm-benchmark-tab-table", id=0):
|
99 |
leaderboard = init_leaderboard(LEADERBOARD_DF)
|
100 |
|
101 |
with gr.TabItem("๐ About", elem_id="llm-benchmark-tab-table", id=2):
|
102 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
103 |
|
104 |
+
with gr.TabItem("๐ ่ฟฝๅ ! ", elem_id="llm-benchmark-tab-table", id=3):
|
105 |
with gr.Column():
|
106 |
with gr.Row():
|
107 |
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
108 |
|
109 |
with gr.Column():
|
110 |
with gr.Accordion(
|
111 |
+
f"โ
่ฉไพกใๅฎไบใใใขใใซ ({len(finished_eval_queue_df)})",
|
112 |
open=False,
|
113 |
):
|
114 |
with gr.Row():
|
|
|
119 |
row_count=5,
|
120 |
)
|
121 |
with gr.Accordion(
|
122 |
+
f"๐ ่ฉไพกๅฎ่กไธญใฎใขใใซ ({len(running_eval_queue_df)})",
|
123 |
open=False,
|
124 |
):
|
125 |
with gr.Row():
|
|
|
142 |
row_count=5,
|
143 |
)
|
144 |
with gr.Row():
|
145 |
+
gr.Markdown("# โ๏ธโจ ใใฒใขใใซใ่ฟฝๅ ใใ ใใ๏ผ", elem_classes="markdown-text")
|
146 |
|
147 |
with gr.Row():
|
148 |
with gr.Column():
|
149 |
+
model_name_textbox = gr.Textbox(label="ใขใใซใใผใ ")
|
150 |
+
revision_name_textbox = gr.Textbox(label="ใณใใใ", placeholder="main")
|
151 |
model_type = gr.Dropdown(
|
152 |
choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
|
153 |
+
label="ใขใใซใฟใคใ",
|
154 |
multiselect=False,
|
155 |
value=None,
|
156 |
interactive=True,
|
|
|
166 |
)
|
167 |
weight_type = gr.Dropdown(
|
168 |
choices=[i.value.name for i in WeightType],
|
169 |
+
label="ใฆใงใคใใฟใคใ",
|
170 |
multiselect=False,
|
171 |
value="Original",
|
172 |
interactive=True,
|
173 |
)
|
174 |
base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
|
175 |
|
176 |
+
submit_button = gr.Button("็ป้ฒ")
|
177 |
submission_result = gr.Markdown()
|
178 |
submit_button.click(
|
179 |
add_new_eval,
|