Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,8 @@ import os
|
|
4 |
import re
|
5 |
from datetime import datetime
|
6 |
|
|
|
|
|
7 |
def clean_answer(answer):
|
8 |
if pd.isna(answer):
|
9 |
return None
|
@@ -48,6 +50,28 @@ def write_evaluation_results(results, output_file):
|
|
48 |
print('\n'.join(output_text))
|
49 |
print(f"\nResults have been saved to: {output_file}")
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
def evaluate_predictions(prediction_file):
|
52 |
ground_truth_file = "ground_truth.csv" # Specify the path to the ground truth file
|
53 |
if not prediction_file:
|
@@ -120,28 +144,33 @@ def evaluate_predictions(prediction_file):
|
|
120 |
'field_performance': field_metrics
|
121 |
}
|
122 |
|
|
|
123 |
output_file = "evaluation_results.txt"
|
124 |
write_evaluation_results(results, output_file)
|
125 |
-
return "Evaluation completed successfully!", output_file
|
126 |
|
127 |
except Exception as e:
|
128 |
return f"Error during evaluation: {str(e)}", None
|
129 |
|
130 |
# Gradio Interface
|
131 |
-
description = "Upload a prediction CSV file to evaluate predictions against the ground truth
|
132 |
-
|
133 |
-
demo = gr.
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
gr.Textbox(label="Evaluation Status")
|
140 |
-
gr.File(label="Download Evaluation Results")
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
)
|
|
|
|
|
|
|
|
|
145 |
|
146 |
if __name__ == "__main__":
|
147 |
demo.launch()
|
|
|
4 |
import re
|
5 |
from datetime import datetime
|
6 |
|
7 |
+
LEADERBOARD_FILE = "leaderboard.csv" # File to store leaderboard data
|
8 |
+
|
9 |
def clean_answer(answer):
|
10 |
if pd.isna(answer):
|
11 |
return None
|
|
|
50 |
print('\n'.join(output_text))
|
51 |
print(f"\nResults have been saved to: {output_file}")
|
52 |
|
53 |
+
def update_leaderboard(results):
|
54 |
+
# Add results to the leaderboard file
|
55 |
+
new_entry = {
|
56 |
+
"Model Name": results['model_name'],
|
57 |
+
"Overall Accuracy": f"{results['overall_accuracy']:.2%}",
|
58 |
+
"Valid Accuracy": f"{results['valid_accuracy']:.2%}",
|
59 |
+
"Correct Predictions": results['correct_predictions'],
|
60 |
+
"Total Questions": results['total_questions'],
|
61 |
+
"Timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
62 |
+
}
|
63 |
+
leaderboard_df = pd.DataFrame([new_entry])
|
64 |
+
if os.path.exists(LEADERBOARD_FILE):
|
65 |
+
existing_df = pd.read_csv(LEADERBOARD_FILE)
|
66 |
+
leaderboard_df = pd.concat([existing_df, leaderboard_df], ignore_index=True)
|
67 |
+
leaderboard_df.to_csv(LEADERBOARD_FILE, index=False)
|
68 |
+
|
69 |
+
def display_leaderboard():
|
70 |
+
if not os.path.exists(LEADERBOARD_FILE):
|
71 |
+
return "Leaderboard is empty."
|
72 |
+
leaderboard_df = pd.read_csv(LEADERBOARD_FILE)
|
73 |
+
return leaderboard_df.to_markdown(index=False)
|
74 |
+
|
75 |
def evaluate_predictions(prediction_file):
|
76 |
ground_truth_file = "ground_truth.csv" # Specify the path to the ground truth file
|
77 |
if not prediction_file:
|
|
|
144 |
'field_performance': field_metrics
|
145 |
}
|
146 |
|
147 |
+
update_leaderboard(results)
|
148 |
output_file = "evaluation_results.txt"
|
149 |
write_evaluation_results(results, output_file)
|
150 |
+
return "Evaluation completed successfully! Leaderboard updated.", output_file
|
151 |
|
152 |
except Exception as e:
|
153 |
return f"Error during evaluation: {str(e)}", None
|
154 |
|
155 |
# Gradio Interface
|
156 |
+
description = "Upload a prediction CSV file to evaluate predictions against the ground truth and update the leaderboard."
|
157 |
+
|
158 |
+
demo = gr.Blocks()
|
159 |
+
|
160 |
+
with demo:
|
161 |
+
gr.Markdown("# Prediction Evaluation Tool with Leaderboard")
|
162 |
+
with gr.Tab("Evaluate"):
|
163 |
+
file_input = gr.File(label="Upload Prediction CSV")
|
164 |
+
eval_status = gr.Textbox(label="Evaluation Status")
|
165 |
+
eval_results_file = gr.File(label="Download Evaluation Results")
|
166 |
+
eval_button = gr.Button("Evaluate")
|
167 |
+
eval_button.click(
|
168 |
+
evaluate_predictions, inputs=file_input, outputs=[eval_status, eval_results_file]
|
169 |
+
)
|
170 |
+
with gr.Tab("Leaderboard"):
|
171 |
+
leaderboard_text = gr.Textbox(label="Leaderboard", interactive=False)
|
172 |
+
refresh_button = gr.Button("Refresh Leaderboard")
|
173 |
+
refresh_button.click(display_leaderboard, outputs=leaderboard_text)
|
174 |
|
175 |
if __name__ == "__main__":
|
176 |
demo.launch()
|