Update app.py
Browse files
app.py
CHANGED
@@ -168,7 +168,7 @@ if not HF_TOKEN:
|
|
168 |
# "Correct Predictions", "Total Questions", "Timestamp"
|
169 |
# ]).to_csv(LEADERBOARD_FILE, index=False)
|
170 |
|
171 |
-
def initialize_leaderboard_file(
|
172 |
"""
|
173 |
Ensure the leaderboard file exists and has the correct headers.
|
174 |
"""
|
@@ -183,6 +183,21 @@ def initialize_leaderboard_file(LEADERBOARD_FILE):
|
|
183 |
"Total Questions", "Timestamp", "Team Name"
|
184 |
]).to_csv(LEADERBOARD_FILE, index=False)
|
185 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
def clean_answer(answer):
|
187 |
if pd.isna(answer):
|
188 |
return None
|
@@ -470,7 +485,7 @@ def evaluate_predictions(prediction_file, model_name,Team_name ,add_to_leaderboa
|
|
470 |
|
471 |
except Exception as e:
|
472 |
return f"Error during evaluation: {str(e)}", load_leaderboard()
|
473 |
-
initialize_leaderboard_file(
|
474 |
|
475 |
|
476 |
|
@@ -529,7 +544,7 @@ def evaluate_predictions_pro(prediction_file, model_name,Team_name ,add_to_leade
|
|
529 |
|
530 |
except Exception as e:
|
531 |
return f"Error during evaluation: {str(e)}", load_leaderboard_pro()
|
532 |
-
|
533 |
|
534 |
|
535 |
# Function to set default mode
|
@@ -1074,8 +1089,8 @@ with gr.Blocks(css=css_tech_theme) as demo:
|
|
1074 |
interactive=False,
|
1075 |
wrap=True,
|
1076 |
)
|
1077 |
-
|
1078 |
-
|
1079 |
lambda: load_leaderboard_pro(),
|
1080 |
inputs=[],
|
1081 |
outputs=[leaderboard_table],
|
|
|
168 |
# "Correct Predictions", "Total Questions", "Timestamp"
|
169 |
# ]).to_csv(LEADERBOARD_FILE, index=False)
|
170 |
|
171 |
+
def initialize_leaderboard_file():
|
172 |
"""
|
173 |
Ensure the leaderboard file exists and has the correct headers.
|
174 |
"""
|
|
|
183 |
"Total Questions", "Timestamp", "Team Name"
|
184 |
]).to_csv(LEADERBOARD_FILE, index=False)
|
185 |
|
186 |
+
def initialize_leaderboardpro_file():
|
187 |
+
"""
|
188 |
+
Ensure the leaderboard file exists and has the correct headers.
|
189 |
+
"""
|
190 |
+
if not os.path.exists(LEADERBOARD_FILE_pro):
|
191 |
+
pd.DataFrame(columns=[
|
192 |
+
"Model Name", "Overall Accuracy", "Correct Predictions",
|
193 |
+
"Total Questions", "Timestamp", "Team Name"
|
194 |
+
]).to_csv(LEADERBOARD_FILE_pro, index=False)
|
195 |
+
elif os.stat(LEADERBOARD_FILE_pro).st_size == 0:
|
196 |
+
pd.DataFrame(columns=[
|
197 |
+
"Model Name", "Overall Accuracy", "Correct Predictions",
|
198 |
+
"Total Questions", "Timestamp", "Team Name"
|
199 |
+
]).to_csv(LEADERBOARD_FILE_pro, index=False)
|
200 |
+
|
201 |
def clean_answer(answer):
|
202 |
if pd.isna(answer):
|
203 |
return None
|
|
|
485 |
|
486 |
except Exception as e:
|
487 |
return f"Error during evaluation: {str(e)}", load_leaderboard()
|
488 |
+
initialize_leaderboard_file()
|
489 |
|
490 |
|
491 |
|
|
|
544 |
|
545 |
except Exception as e:
|
546 |
return f"Error during evaluation: {str(e)}", load_leaderboard_pro()
|
547 |
+
initialize_leaderboard_file_pro()
|
548 |
|
549 |
|
550 |
# Function to set default mode
|
|
|
1089 |
interactive=False,
|
1090 |
wrap=True,
|
1091 |
)
|
1092 |
+
refresh_button_pro = gr.Button("Refresh Leaderboard-pro")
|
1093 |
+
refresh_button_pro.click(
|
1094 |
lambda: load_leaderboard_pro(),
|
1095 |
inputs=[],
|
1096 |
outputs=[leaderboard_table],
|