hSterz commited on
Commit
b2ed338
·
1 Parent(s): 1be492d
Files changed (2) hide show
  1. src/leaderboard/read_evals.py +2 -2
  2. src/populate.py +2 -2
src/leaderboard/read_evals.py CHANGED
@@ -9,7 +9,7 @@ import numpy as np
9
 
10
  from src.display.formatting import make_clickable_model
11
  from src.display.utils import AutoEvalColumn, ModelType, Precision, WeightType
12
- from src.about import Tasks, N_Tasks
13
  from src.submission.check_validity import is_model_on_hub
14
 
15
 
@@ -177,7 +177,7 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
177
 
178
  version = results_path.split("/")[-1]
179
  print(version)
180
- tasks = N_Tasks if "n_" in version else Tasks
181
  print(tasks)
182
 
183
  eval_results = {}
 
9
 
10
  from src.display.formatting import make_clickable_model
11
  from src.display.utils import AutoEvalColumn, ModelType, Precision, WeightType
12
+ from src.about import Detail_Tasks, Tasks, N_Tasks
13
  from src.submission.check_validity import is_model_on_hub
14
 
15
 
 
177
 
178
  version = results_path.split("/")[-1]
179
  print(version)
180
+ tasks = list(N_Tasks) + list(Detail_Tasks) if "n_" in version else Tasks
181
  print(tasks)
182
 
183
  eval_results = {}
src/populate.py CHANGED
@@ -6,13 +6,13 @@ import pandas as pd
6
  from src.display.formatting import has_no_nan_values, make_clickable_model
7
  from src.display.utils import AutoEvalColumn, EvalQueueColumn
8
  from src.leaderboard.read_evals import get_raw_eval_results
9
- from src.about import Tasks, N_Tasks
10
 
11
 
12
  def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list, version="1_correct") -> pd.DataFrame:
13
  """Creates a dataframe from all the individual experiment results"""
14
  raw_data = get_raw_eval_results(results_path+"/"+version, requests_path)
15
- tasks = Tasks if version == "1_correct" else N_Tasks
16
  all_data_json = [v.to_dict(tasks) for v in raw_data]
17
 
18
  print(all_data_json)
 
6
  from src.display.formatting import has_no_nan_values, make_clickable_model
7
  from src.display.utils import AutoEvalColumn, EvalQueueColumn
8
  from src.leaderboard.read_evals import get_raw_eval_results
9
+ from src.about import Tasks, N_Tasks, Detail_Tasks
10
 
11
 
12
  def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list, version="1_correct") -> pd.DataFrame:
13
  """Creates a dataframe from all the individual experiment results"""
14
  raw_data = get_raw_eval_results(results_path+"/"+version, requests_path)
15
+ tasks = Tasks if version == "1_correct" else list(N_Tasks) + list(Detail_Tasks)
16
  all_data_json = [v.to_dict(tasks) for v in raw_data]
17
 
18
  print(all_data_json)