SondosMB commited on
Commit
ccd077e
·
verified ·
1 Parent(s): 2961f01

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -10
app.py CHANGED
@@ -183,6 +183,21 @@ def initialize_leaderboard_file():
183
  "Total Questions", "Timestamp", "Team Name"
184
  ]).to_csv(LEADERBOARD_FILE, index=False)
185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  def clean_answer(answer):
187
  if pd.isna(answer):
188
  return None
@@ -291,22 +306,22 @@ def update_leaderboard_pro(results):
291
  try:
292
  # Update the local leaderboard file
293
  new_entry_df = pd.DataFrame([new_entry])
294
- file_exists = os.path.exists(LEADERBOARD_FILE)
295
 
296
  new_entry_df.to_csv(
297
- LEADERBOARD_FILE,
298
  mode='a', # Append mode
299
  index=False,
300
  header=not file_exists # Write header only if the file is new
301
  )
302
- print(f"Leaderboard updated successfully at {LEADERBOARD_FILE}")
303
 
304
  # Push the updated file to the Hugging Face repository using HTTP API
305
  api = HfApi()
306
  token = HfFolder.get_token()
307
 
308
  api.upload_file(
309
- path_or_fileobj=LEADERBOARD_FILE,
310
  path_in_repo="leaderboardPro.csv",
311
  repo_id="SondosMB/Mobile-MMLU", # Your Space repository
312
  repo_type="space",
@@ -343,6 +358,19 @@ def load_leaderboard():
343
 
344
  })
345
  return pd.read_csv(LEADERBOARD_FILE)
 
 
 
 
 
 
 
 
 
 
 
 
 
346
 
347
  # def evaluate_predictions(prediction_file, model_name, add_to_leaderboard):
348
  # try:
@@ -486,7 +514,7 @@ def evaluate_predictions_pro(prediction_file, model_name,Team_name ,add_to_leade
486
  missing_columns = [col for col in required_columns if col not in predictions_df.columns]
487
  if missing_columns:
488
  return (f"Error: Missing required columns in prediction file: {', '.join(missing_columns)}.",
489
- load_leaderboard())
490
 
491
  # Validate 'Answer' column in ground truth file
492
  if 'Answer' not in ground_truth_df.columns:
@@ -516,7 +544,7 @@ def evaluate_predictions_pro(prediction_file, model_name,Team_name ,add_to_leade
516
 
517
  except Exception as e:
518
  return f"Error during evaluation: {str(e)}", load_leaderboard_pro()
519
- initialize_leaderboard_file()
520
 
521
 
522
  # Function to set default mode
@@ -1056,14 +1084,14 @@ with gr.Blocks(css=css_tech_theme) as demo:
1056
  )
1057
  with gr.TabItem("🏅 Leaderboard-pro"):
1058
  leaderboard_table = gr.Dataframe(
1059
- value=load_leaderboard,
1060
  label="Leaderboard",
1061
  interactive=False,
1062
  wrap=True,
1063
  )
1064
- refresh_button = gr.Button("Refresh Leaderboard")
1065
- refresh_button.click(
1066
- lambda: load_leaderboard,
1067
  inputs=[],
1068
  outputs=[leaderboard_table],
1069
  )
 
183
  "Total Questions", "Timestamp", "Team Name"
184
  ]).to_csv(LEADERBOARD_FILE, index=False)
185
 
186
+ def initialize_leaderboard_pro_file():
187
+ """
188
+ Ensure the leaderboard file exists and has the correct headers.
189
+ """
190
+ if not os.path.exists(LEADERBOARD_FILE_pro):
191
+ pd.DataFrame(columns=[
192
+ "Model Name", "Overall Accuracy", "Correct Predictions",
193
+ "Total Questions", "Timestamp", "Team Name"
194
+ ]).to_csv(LEADERBOARD_FILE_pro, index=False)
195
+ elif os.stat(LEADERBOARD_FILE_pro).st_size == 0:
196
+ pd.DataFrame(columns=[
197
+ "Model Name", "Overall Accuracy", "Correct Predictions",
198
+ "Total Questions", "Timestamp", "Team Name"
199
+ ]).to_csv(LEADERBOARD_FILE_pro, index=False)
200
+
201
  def clean_answer(answer):
202
  if pd.isna(answer):
203
  return None
 
306
  try:
307
  # Update the local leaderboard file
308
  new_entry_df = pd.DataFrame([new_entry])
309
+ file_exists = os.path.exists(LEADERBOARD_FILE_pro)
310
 
311
  new_entry_df.to_csv(
312
+ LEADERBOARD_FILE_pro,
313
  mode='a', # Append mode
314
  index=False,
315
  header=not file_exists # Write header only if the file is new
316
  )
317
+ print(f"Leaderboard updated successfully at {LEADERBOARD_FILE_pro}")
318
 
319
  # Push the updated file to the Hugging Face repository using HTTP API
320
  api = HfApi()
321
  token = HfFolder.get_token()
322
 
323
  api.upload_file(
324
+ path_or_fileobj=LEADERBOARD_FILE_pro,
325
  path_in_repo="leaderboardPro.csv",
326
  repo_id="SondosMB/Mobile-MMLU", # Your Space repository
327
  repo_type="space",
 
358
 
359
  })
360
  return pd.read_csv(LEADERBOARD_FILE)
361
+
362
+ def load_leaderboard_pro():
363
+ if not os.path.exists(LEADERBOARD_FILE_pro) or os.stat(LEADERBOARD_FILE_pro).st_size == 0:
364
+ return pd.DataFrame({
365
+ "Model Name": [],
366
+ "Overall Accuracy": [],
367
+ "Correct Predictions": [],
368
+ "Total Questions": [],
369
+ "Timestamp": [],
370
+ "Team Name": [],
371
+
372
+ })
373
+ return pd.read_csv(LEADERBOARD_FILE_pro)
374
 
375
  # def evaluate_predictions(prediction_file, model_name, add_to_leaderboard):
376
  # try:
 
514
  missing_columns = [col for col in required_columns if col not in predictions_df.columns]
515
  if missing_columns:
516
  return (f"Error: Missing required columns in prediction file: {', '.join(missing_columns)}.",
517
+ load_leaderboard_pro())
518
 
519
  # Validate 'Answer' column in ground truth file
520
  if 'Answer' not in ground_truth_df.columns:
 
544
 
545
  except Exception as e:
546
  return f"Error during evaluation: {str(e)}", load_leaderboard_pro()
547
+ initialize_leaderboard_pro_file()
548
 
549
 
550
  # Function to set default mode
 
1084
  )
1085
  with gr.TabItem("🏅 Leaderboard-pro"):
1086
  leaderboard_table = gr.Dataframe(
1087
+ value=load_leaderboard_pro(),
1088
  label="Leaderboard",
1089
  interactive=False,
1090
  wrap=True,
1091
  )
1092
+ refresh_button_pro = gr.Button("Refresh Leaderboard-pro")
1093
+ refresh_button_pro.click(
1094
+ lambda: load_leaderboard_pro(),
1095
  inputs=[],
1096
  outputs=[leaderboard_table],
1097
  )