mirageco commited on
Commit
da9cc09
1 Parent(s): 7723d4d

Remove unused debug print. Add addition check for request files

Browse files
Files changed (2) hide show
  1. app.py +0 -10
  2. src/leaderboard/read_evals.py +8 -1
app.py CHANGED
@@ -134,16 +134,12 @@ def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
134
  def filter_models(
135
  df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
136
  ) -> pd.DataFrame:
137
- print("Initial number of models:", len(df))
138
-
139
  # Show all models
140
  if show_deleted:
141
  filtered_df = df
142
  else:
143
  filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
144
 
145
- print("After hub filter:", len(filtered_df))
146
-
147
  if "All" not in type_query:
148
  if "?" in type_query:
149
  filtered_df = filtered_df.loc[~df[AutoEvalColumn.model_type_symbol.name].isin([t for t in ModelType if t != "?"])]
@@ -151,16 +147,12 @@ def filter_models(
151
  type_emoji = [t[0] for t in type_query]
152
  filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
153
 
154
- print("After type filter:", len(filtered_df))
155
-
156
  if "All" not in precision_query:
157
  if "?" in precision_query:
158
  filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isna()]
159
  else:
160
  filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
161
 
162
- print("After precision filter:", len(filtered_df))
163
-
164
  if "All" not in size_query:
165
  if "?" in size_query:
166
  filtered_df = filtered_df.loc[df[AutoEvalColumn.params.name].isna()]
@@ -170,8 +162,6 @@ def filter_models(
170
  mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
171
  filtered_df = filtered_df.loc[mask]
172
 
173
- print("After size filter:", len(filtered_df))
174
-
175
  return filtered_df
176
 
177
 
 
134
  def filter_models(
135
  df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
136
  ) -> pd.DataFrame:
 
 
137
  # Show all models
138
  if show_deleted:
139
  filtered_df = df
140
  else:
141
  filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
142
 
 
 
143
  if "All" not in type_query:
144
  if "?" in type_query:
145
  filtered_df = filtered_df.loc[~df[AutoEvalColumn.model_type_symbol.name].isin([t for t in ModelType if t != "?"])]
 
147
  type_emoji = [t[0] for t in type_query]
148
  filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
149
 
 
 
150
  if "All" not in precision_query:
151
  if "?" in precision_query:
152
  filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isna()]
153
  else:
154
  filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
155
 
 
 
156
  if "All" not in size_query:
157
  if "?" in size_query:
158
  filtered_df = filtered_df.loc[df[AutoEvalColumn.params.name].isna()]
 
162
  mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
163
  filtered_df = filtered_df.loc[mask]
164
 
 
 
165
  return filtered_df
166
 
167
 
src/leaderboard/read_evals.py CHANGED
@@ -38,8 +38,9 @@ class EvalResult:
38
  with open(json_filepath) as fp:
39
  data = json.load(fp)
40
 
41
- config = data.get("config")
42
 
 
43
  # Precision
44
  precision = Precision.from_str(config.get("model_dtype"))
45
 
@@ -82,6 +83,8 @@ class EvalResult:
82
  mean_acc = np.mean(accs) * 100.0
83
  results[task.benchmark] = mean_acc
84
 
 
 
85
  return self(
86
  eval_name=result_key,
87
  full_model=full_model,
@@ -95,6 +98,7 @@ class EvalResult:
95
  model_type=model_type
96
  )
97
 
 
98
  def update_with_request_file(self, requests_path):
99
  """Finds the relevant request file for the current model and updates info with it"""
100
  request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
@@ -176,6 +180,8 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
176
  for file in files:
177
  model_result_filepaths.append(os.path.join(root, file))
178
 
 
 
179
  eval_results = {}
180
  for model_result_filepath in model_result_filepaths:
181
  # Creation of result
@@ -197,4 +203,5 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
197
  except KeyError: # not all eval values present
198
  continue
199
 
 
200
  return results
 
38
  with open(json_filepath) as fp:
39
  data = json.load(fp)
40
 
41
+ print(f"Processing file: {json_filepath}")
42
 
43
+ config = data.get("config")
44
  # Precision
45
  precision = Precision.from_str(config.get("model_dtype"))
46
 
 
83
  mean_acc = np.mean(accs) * 100.0
84
  results[task.benchmark] = mean_acc
85
 
86
+ print(f"Model: {model}, Org: {org}, Results: {results.keys()}")
87
+
88
  return self(
89
  eval_name=result_key,
90
  full_model=full_model,
 
98
  model_type=model_type
99
  )
100
 
101
+
102
  def update_with_request_file(self, requests_path):
103
  """Finds the relevant request file for the current model and updates info with it"""
104
  request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
 
180
  for file in files:
181
  model_result_filepaths.append(os.path.join(root, file))
182
 
183
+ print(f"Found {len(model_result_filepaths)} JSON files to process.")
184
+
185
  eval_results = {}
186
  for model_result_filepath in model_result_filepaths:
187
  # Creation of result
 
203
  except KeyError: # not all eval values present
204
  continue
205
 
206
+ print(f"Successfully loaded {len(results)} models.")
207
  return results