Paul Hager commited on
Commit
5f8b961
·
1 Parent(s): 5a6d6fb
Files changed (3) hide show
  1. app.py +34 -20
  2. src/display/utils.py +4 -4
  3. src/populate.py +39 -6
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
  from huggingface_hub import snapshot_download
@@ -73,33 +73,44 @@ LEADERBOARD_DF_CDM_FI = get_leaderboard_df(EVAL_RESULTS_PATH_CDM_FI, COLS, BENCH
73
 
74
  def init_leaderboard(dataframe):
75
  if dataframe is None or dataframe.empty:
76
- raise ValueError("Leaderboard DataFrame is empty or None.")
77
- return Leaderboard(
78
- value=dataframe,
79
- datatype=[c.type for c in fields(AutoEvalColumn)],
80
- select_columns=SelectColumns(
81
- default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
82
- cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
83
- label="Select Columns to Display:",
84
- ),
85
- search_columns=[AutoEvalColumn.model.name],
86
- interactive=False,
87
- )
88
-
89
-
 
 
 
 
 
 
 
 
 
 
90
  demo = gr.Blocks(css=custom_css)
 
91
  with demo:
92
  gr.HTML(TITLE)
93
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
94
 
95
- with gr.Tabs(elem_classes="tab-buttons") as tabs:
96
- with gr.TabItem("MIMIC CDM", elem_id="llm-benchmark-tab-table", id=0):
97
  leaderboard_cdm = init_leaderboard(LEADERBOARD_DF_CDM)
98
 
99
- with gr.TabItem("MIMIC CDM FI", elem_id="llm-benchmark-tab-table", id=1):
100
  leaderboard_cdm_fi = init_leaderboard(LEADERBOARD_DF_CDM_FI)
101
 
102
- with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
103
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
104
 
105
  with gr.Row():
@@ -112,7 +123,10 @@ with demo:
112
  show_copy_button=True,
113
  )
114
 
 
115
  scheduler = BackgroundScheduler()
116
  scheduler.add_job(restart_space, "interval", seconds=1800)
117
  scheduler.start()
118
- demo.queue(default_concurrency_limit=40).launch(share=True)
 
 
 
1
  import gradio as gr
2
+ from gradio_leaderboard import Leaderboard, SelectColumns
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
  from huggingface_hub import snapshot_download
 
73
 
74
  def init_leaderboard(dataframe):
75
  if dataframe is None or dataframe.empty:
76
+ print("Warning: Empty dataframe provided to leaderboard")
77
+ # Return a simple empty dataframe instead of trying to create a custom one
78
+ return gr.Dataframe(value=pd.DataFrame())
79
+
80
+ print(f"Initializing leaderboard with {len(dataframe)} rows")
81
+ print(f"Columns: {dataframe.columns.tolist()}")
82
+
83
+ # Convert dataframe to ensure proper types
84
+ for col in dataframe.columns:
85
+ if col in ["average", "params"] + [t.value.col_name for t in Tasks]:
86
+ dataframe[col] = pd.to_numeric(dataframe[col], errors="coerce")
87
+ elif col == "still_on_hub":
88
+ dataframe[col] = dataframe[col].astype(bool)
89
+ else:
90
+ dataframe[col] = dataframe[col].astype(str)
91
+
92
+ try:
93
+ return gr.Dataframe(value=dataframe, interactive=False, wrap=True)
94
+ except Exception as e:
95
+ print(f"Error initializing leaderboard: {e}")
96
+ return gr.Dataframe(value=pd.DataFrame())
97
+
98
+
99
+ # Initialize the app
100
  demo = gr.Blocks(css=custom_css)
101
+
102
  with demo:
103
  gr.HTML(TITLE)
104
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
105
 
106
+ with gr.Tabs() as tabs:
107
+ with gr.Tab("MIMIC CDM"):
108
  leaderboard_cdm = init_leaderboard(LEADERBOARD_DF_CDM)
109
 
110
+ with gr.Tab("MIMIC CDM FI"):
111
  leaderboard_cdm_fi = init_leaderboard(LEADERBOARD_DF_CDM_FI)
112
 
113
+ with gr.Tab("📝 About"):
114
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
115
 
116
  with gr.Row():
 
123
  show_copy_button=True,
124
  )
125
 
126
+ # Start the scheduler
127
  scheduler = BackgroundScheduler()
128
  scheduler.add_job(restart_space, "interval", seconds=1800)
129
  scheduler.start()
130
+
131
+ # Launch the app
132
+ demo.queue(default_concurrency_limit=40).launch()
src/display/utils.py CHANGED
@@ -26,18 +26,18 @@ class ColumnContent:
26
  auto_eval_column_dict = []
27
  # Init
28
  # auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
29
- auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
30
  # Scores
31
- auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
32
  for task in Tasks:
33
- auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
34
  # Model information
35
  # auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
36
  auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
37
  # auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
38
  # auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
39
  # auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
40
- auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
41
  auto_eval_column_dict.append(["seq_length", ColumnContent, ColumnContent("Max Sequence Length", "number", False)])
42
  auto_eval_column_dict.append(
43
  ["model_quantization_bits", ColumnContent, ColumnContent("Quantization Bits", "number", False)]
 
26
  auto_eval_column_dict = []
27
  # Init
28
  # auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
29
+ auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "str", True, never_hidden=True)])
30
  # Scores
31
+ auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "float", True)])
32
  for task in Tasks:
33
+ auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "float", True)])
34
  # Model information
35
  # auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
36
  auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
37
  # auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
38
  # auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
39
  # auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
40
+ auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "float", False)])
41
  auto_eval_column_dict.append(["seq_length", ColumnContent, ColumnContent("Max Sequence Length", "number", False)])
42
  auto_eval_column_dict.append(
43
  ["model_quantization_bits", ColumnContent, ColumnContent("Quantization Bits", "number", False)]
src/populate.py CHANGED
@@ -11,15 +11,48 @@ from src.leaderboard.read_evals import get_raw_eval_results
11
  def get_leaderboard_df(results_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
  """Creates a dataframe from all the individual experiment results"""
13
  raw_data = get_raw_eval_results(results_path)
 
 
 
 
14
  all_data_json = [v.to_dict() for v in raw_data]
15
 
16
- df = pd.DataFrame.from_records(all_data_json)
17
- df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
18
- df = df[cols].round(decimals=2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
- # filter out if any of the benchmarks have not been produced
21
- df = df[has_no_nan_values(df, benchmark_cols)]
22
- return df
 
 
 
23
 
24
 
25
  def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
 
11
  def get_leaderboard_df(results_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
  """Creates a dataframe from all the individual experiment results"""
13
  raw_data = get_raw_eval_results(results_path)
14
+ if not raw_data:
15
+ print(f"Warning: No results found in {results_path}")
16
+ return pd.DataFrame(columns=cols)
17
+
18
  all_data_json = [v.to_dict() for v in raw_data]
19
 
20
+ try:
21
+ df = pd.DataFrame.from_records(all_data_json)
22
+
23
+ # Ensure all required columns exist with proper types
24
+ for col in cols:
25
+ if col not in df.columns:
26
+ df[col] = None
27
+
28
+ # Convert numeric columns
29
+ numeric_cols = ["average", "params"] + [t.value.col_name for t in Tasks]
30
+ for col in numeric_cols:
31
+ if col in df.columns:
32
+ df[col] = pd.to_numeric(df[col], errors="coerce")
33
+
34
+ # Convert boolean columns
35
+ if "still_on_hub" in df.columns:
36
+ df["still_on_hub"] = df["still_on_hub"].astype(bool)
37
+
38
+ # Convert string columns
39
+ string_cols = ["model", "architecture"]
40
+ for col in string_cols:
41
+ if col in df.columns:
42
+ df[col] = df[col].astype(str)
43
+
44
+ df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
45
+ df = df[cols].round(decimals=2)
46
+
47
+ # filter out if any of the benchmarks have not been produced
48
+ df = df[has_no_nan_values(df, benchmark_cols)]
49
 
50
+ print(f"Loaded {len(df)} results from {results_path}")
51
+ print(f"Columns: {df.columns.tolist()}")
52
+ return df
53
+ except Exception as e:
54
+ print(f"Error creating dataframe: {e}")
55
+ return pd.DataFrame(columns=cols)
56
 
57
 
58
  def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]: