Sakalti commited on
Commit
0b4e494
ยท
verified ยท
1 Parent(s): 16787eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -13
app.py CHANGED
@@ -66,12 +66,12 @@ def init_leaderboard(dataframe):
66
  select_columns=SelectColumns(
67
  default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
68
  cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
69
- label="Select Columns to Display:",
70
  ),
71
  search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
72
  hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
  filter_columns=[
74
- ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
75
  ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
76
  ColumnFilter(
77
  AutoEvalColumn.params.name,
@@ -84,7 +84,7 @@ def init_leaderboard(dataframe):
84
  AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
85
  ),
86
  ],
87
- bool_checkboxgroup_label="Hide models",
88
  interactive=False,
89
  )
90
 
@@ -95,20 +95,20 @@ with demo:
95
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
96
 
97
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
98
- with gr.TabItem("๐Ÿ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
99
  leaderboard = init_leaderboard(LEADERBOARD_DF)
100
 
101
  with gr.TabItem("๐Ÿ“ About", elem_id="llm-benchmark-tab-table", id=2):
102
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
103
 
104
- with gr.TabItem("๐Ÿš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
105
  with gr.Column():
106
  with gr.Row():
107
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
108
 
109
  with gr.Column():
110
  with gr.Accordion(
111
- f"โœ… Finished Evaluations ({len(finished_eval_queue_df)})",
112
  open=False,
113
  ):
114
  with gr.Row():
@@ -119,7 +119,7 @@ with demo:
119
  row_count=5,
120
  )
121
  with gr.Accordion(
122
- f"๐Ÿ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
123
  open=False,
124
  ):
125
  with gr.Row():
@@ -142,15 +142,15 @@ with demo:
142
  row_count=5,
143
  )
144
  with gr.Row():
145
- gr.Markdown("# โœ‰๏ธโœจ Submit your model here!", elem_classes="markdown-text")
146
 
147
  with gr.Row():
148
  with gr.Column():
149
- model_name_textbox = gr.Textbox(label="Model name")
150
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
151
  model_type = gr.Dropdown(
152
  choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
153
- label="Model type",
154
  multiselect=False,
155
  value=None,
156
  interactive=True,
@@ -166,14 +166,14 @@ with demo:
166
  )
167
  weight_type = gr.Dropdown(
168
  choices=[i.value.name for i in WeightType],
169
- label="Weights type",
170
  multiselect=False,
171
  value="Original",
172
  interactive=True,
173
  )
174
  base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
175
 
176
- submit_button = gr.Button("Submit Eval")
177
  submission_result = gr.Markdown()
178
  submit_button.click(
179
  add_new_eval,
 
66
  select_columns=SelectColumns(
67
  default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
68
  cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
69
+ label="ใƒ‡ใ‚ฃใ‚นใƒ—ใƒฌใ‚คใซ่กจ็คบใ™ใ‚‹่กŒใ‚’้ธๆŠžใ€‚",
70
  ),
71
  search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
72
  hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
  filter_columns=[
74
+ ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="ใƒขใƒ‡ใƒซใ‚ฟใ‚คใƒ—"),
75
  ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
76
  ColumnFilter(
77
  AutoEvalColumn.params.name,
 
84
  AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
85
  ),
86
  ],
87
+ bool_checkboxgroup_label="ใƒขใƒ‡ใƒซใ‚’้š ใ™",
88
  interactive=False,
89
  )
90
 
 
95
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
96
 
97
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
98
+ with gr.TabItem("๐Ÿ… LLM ใƒ™ใƒณใƒใƒžใƒผใ‚ฏ", elem_id="llm-benchmark-tab-table", id=0):
99
  leaderboard = init_leaderboard(LEADERBOARD_DF)
100
 
101
  with gr.TabItem("๐Ÿ“ About", elem_id="llm-benchmark-tab-table", id=2):
102
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
103
 
104
+ with gr.TabItem("๐Ÿš€ ่ฟฝๅŠ ! ", elem_id="llm-benchmark-tab-table", id=3):
105
  with gr.Column():
106
  with gr.Row():
107
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
108
 
109
  with gr.Column():
110
  with gr.Accordion(
111
+ f"โœ… ่ฉ•ไพกใŒๅฎŒไบ†ใ—ใŸใƒขใƒ‡ใƒซ ({len(finished_eval_queue_df)})",
112
  open=False,
113
  ):
114
  with gr.Row():
 
119
  row_count=5,
120
  )
121
  with gr.Accordion(
122
+ f"๐Ÿ”„ ่ฉ•ไพกๅฎŸ่กŒไธญใฎใƒขใƒ‡ใƒซ ({len(running_eval_queue_df)})",
123
  open=False,
124
  ):
125
  with gr.Row():
 
142
  row_count=5,
143
  )
144
  with gr.Row():
145
+ gr.Markdown("# โœ‰๏ธโœจ ใœใฒใƒขใƒ‡ใƒซใ‚’่ฟฝๅŠ ใใ ใ•ใ„๏ผ", elem_classes="markdown-text")
146
 
147
  with gr.Row():
148
  with gr.Column():
149
+ model_name_textbox = gr.Textbox(label="ใƒขใƒ‡ใƒซใƒใƒผใƒ ")
150
+ revision_name_textbox = gr.Textbox(label="ใ‚ณใƒŸใƒƒใƒˆ", placeholder="main")
151
  model_type = gr.Dropdown(
152
  choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
153
+ label="ใƒขใƒ‡ใƒซใ‚ฟใ‚คใƒ—",
154
  multiselect=False,
155
  value=None,
156
  interactive=True,
 
166
  )
167
  weight_type = gr.Dropdown(
168
  choices=[i.value.name for i in WeightType],
169
+ label="ใ‚ฆใ‚งใ‚คใƒˆใ‚ฟใ‚คใƒ—",
170
  multiselect=False,
171
  value="Original",
172
  interactive=True,
173
  )
174
  base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
175
 
176
+ submit_button = gr.Button("็™ป้Œฒ")
177
  submission_result = gr.Markdown()
178
  submit_button.click(
179
  add_new_eval,