pufanyi commited on
Commit
903180b
1 Parent(s): 15d3941

chore: Remove commented out code for model information in utils.py

Browse files
Files changed (2) hide show
  1. src/display/utils.py +0 -10
  2. src/populate.py +1 -0
src/display/utils.py CHANGED
@@ -29,16 +29,6 @@ auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model Name"
29
  auto_eval_column_dict.append(["Overall", ColumnContent, ColumnContent("Total", "number", True)])
30
  for task in Tasks:
31
  auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
32
- # Model information
33
- # auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
34
- # auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
35
- # auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
36
- # auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
37
- # auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
38
- # auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
39
- # auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
40
- # auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
41
- # auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
42
 
43
  # We use make dataclass to dynamically fill the scores from Tasks
44
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
 
29
  auto_eval_column_dict.append(["Overall", ColumnContent, ColumnContent("Total", "number", True)])
30
  for task in Tasks:
31
  auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
 
 
 
 
 
 
 
 
 
 
32
 
33
  # We use make dataclass to dynamically fill the scores from Tasks
34
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
src/populate.py CHANGED
@@ -27,6 +27,7 @@ from datasets import load_dataset
27
  def get_leaderboard_df(results_repo, results_path, dataset_version):
28
  hf_leaderboard = load_dataset(results_repo, dataset_version, split="test", cache_dir=results_path)
29
  df = hf_leaderboard.to_pandas()
 
30
  print(df)
31
  return df
32
 
 
27
  def get_leaderboard_df(results_repo, results_path, dataset_version):
28
  hf_leaderboard = load_dataset(results_repo, dataset_version, split="test", cache_dir=results_path)
29
  df = hf_leaderboard.to_pandas()
30
+ df = df.sort_values(by="Total", ascending=False)
31
  print(df)
32
  return df
33