pufanyi commited on
Commit
944c822
β€’
1 Parent(s): cb5cde2

Refactor init_leaderboard function to improve dropdown UI and add search functionality

Browse files
Files changed (1) hide show
  1. app.py +25 -17
app.py CHANGED
@@ -70,23 +70,31 @@ LEADERBOARD_DF, SUBSETS = get_leaderboard_df(RESULTS_REPO)
70
 
71
  def init_leaderboard(dataframes, subsets):
72
  subsets = list(subsets)
73
- selected_subset = gr.Dropdown(choices=subsets, label="Select Dataset Subset", value=subsets[-1])
 
 
 
 
 
 
 
74
 
75
- return Leaderboard(
76
- value=dataframes,
77
- datatype=[c.type for c in fields(AutoEvalColumn)],
78
- select_columns=SelectColumns(
79
- default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
80
- cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
81
- label="Select Columns to Display:",
82
- ),
83
- search_columns=[AutoEvalColumn.model.name],
84
- hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
85
- filter_columns=[
86
- "Dataset Version",
87
- ],
88
- interactive=False,
89
- )
 
90
 
91
  demo = gr.Blocks(css=custom_css)
92
  with demo:
@@ -95,7 +103,7 @@ with demo:
95
 
96
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
97
  with gr.TabItem("πŸ… LiveBench Results", elem_id="llm-benchmark-tab-table", id=0):
98
- leaderboard = init_leaderboard(LEADERBOARD_DF, SUBSETS)
99
 
100
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
101
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
 
70
 
71
  def init_leaderboard(dataframes, subsets):
72
  subsets = list(subsets)
73
+
74
+ with gr.Row():
75
+ selected_subset = gr.Dropdown(choices=subsets, label="Select Dataset Subset", value=subsets[-1])
76
+ research_textbox = gr.Textbox(placeholder="πŸ” Search Models... [press enter]", label="Filter Models by Name", )
77
+
78
+ with gr.Row():
79
+ datatype = [c.type for c in fields(AutoEvalColumn)]
80
+ dataframe = gr.Dataframe(dataframes, datatype=datatype, type="pandas")
81
 
82
+
83
+ # return Leaderboard(
84
+ # value=dataframes,
85
+ # datatype=[c.type for c in fields(AutoEvalColumn)],
86
+ # select_columns=SelectColumns(
87
+ # default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
88
+ # cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
89
+ # label="Select Columns to Display:",
90
+ # ),
91
+ # search_columns=[AutoEvalColumn.model.name],
92
+ # hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
93
+ # filter_columns=[
94
+ # "Dataset Version",
95
+ # ],
96
+ # interactive=False,
97
+ # )
98
 
99
  demo = gr.Blocks(css=custom_css)
100
  with demo:
 
103
 
104
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
105
  with gr.TabItem("πŸ… LiveBench Results", elem_id="llm-benchmark-tab-table", id=0):
106
+ init_leaderboard(LEADERBOARD_DF, SUBSETS)
107
 
108
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
109
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")