sudoping01 commited on
Commit
d4aa692
·
verified ·
1 Parent(s): 74d890a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -162
app.py CHANGED
@@ -40,17 +40,22 @@ except Exception as e:
40
  # Initialize leaderboard file
41
  leaderboard_file = "leaderboard.csv"
42
  if not os.path.exists(leaderboard_file):
43
- pd.DataFrame(columns=["submitter", "WER", "CER", "weighted_WER", "weighted_CER", "samples_evaluated", "timestamp"]).to_csv(leaderboard_file, index=False)
44
  else:
45
  print(f"Loaded existing leaderboard with {len(pd.read_csv(leaderboard_file))} entries")
46
 
47
  def normalize_text(text):
48
  """
49
- Normalize text by converting to lowercase, removing punctuation, and normalizing whitespace.
 
 
 
 
50
  """
51
  if not isinstance(text, str):
52
  text = str(text)
53
 
 
54
  text = text.lower()
55
 
56
  # Remove punctuation, keeping spaces
@@ -62,15 +67,11 @@ def normalize_text(text):
62
  return text
63
 
64
  def calculate_metrics(predictions_df):
65
- """
66
- Calculate WER and CER for each sample and return averages and per-sample results.
67
- Uses both standard average and length-weighted average.
68
- """
69
- per_sample_metrics = []
70
  total_ref_words = 0
71
  total_ref_chars = 0
72
 
73
- # Process each sample
74
  for _, row in predictions_df.iterrows():
75
  id_val = row["id"]
76
  if id_val not in references:
@@ -80,25 +81,27 @@ def calculate_metrics(predictions_df):
80
  reference = normalize_text(references[id_val])
81
  hypothesis = normalize_text(row["text"])
82
 
 
 
 
 
 
 
 
83
  if not reference or not hypothesis:
84
  print(f"Warning: Empty reference or hypothesis for ID {id_val}")
85
  continue
86
 
 
87
  reference_words = reference.split()
 
88
  reference_chars = list(reference)
89
 
90
- # Skip very short references for more stable metrics
91
- if len(reference_words) < 2:
92
- print(f"Warning: Reference too short for ID {id_val}, skipping")
93
- continue
94
-
95
- # Store sample info for debugging (first few samples)
96
- if len(per_sample_metrics) < 5:
97
- print(f"ID: {id_val}")
98
- print(f"Reference: '{reference}'")
99
- print(f"Hypothesis: '{hypothesis}'")
100
  print(f"Reference words: {reference_words}")
 
101
 
 
102
  try:
103
  # Calculate WER and CER
104
  sample_wer = wer(reference, hypothesis)
@@ -112,10 +115,10 @@ def calculate_metrics(predictions_df):
112
  total_ref_words += len(reference_words)
113
  total_ref_chars += len(reference_chars)
114
 
115
- if len(per_sample_metrics) < 5:
116
  print(f"WER: {sample_wer}, CER: {sample_cer}")
117
-
118
- per_sample_metrics.append({
119
  "id": id_val,
120
  "reference": reference,
121
  "hypothesis": hypothesis,
@@ -127,91 +130,50 @@ def calculate_metrics(predictions_df):
127
  except Exception as e:
128
  print(f"Error calculating metrics for ID {id_val}: {str(e)}")
129
 
130
- if not per_sample_metrics:
131
  raise ValueError("No valid samples for WER/CER calculation")
132
-
133
  # Calculate standard average metrics
134
- avg_wer = sum(item["wer"] for item in per_sample_metrics) / len(per_sample_metrics)
135
- avg_cer = sum(item["cer"] for item in per_sample_metrics) / len(per_sample_metrics)
136
 
137
  # Calculate weighted average metrics based on reference length
138
- weighted_wer = sum(item["wer"] * item["ref_word_count"] for item in per_sample_metrics) / total_ref_words
139
- weighted_cer = sum(item["cer"] * item["ref_char_count"] for item in per_sample_metrics) / total_ref_chars
140
 
141
  print(f"Simple average WER: {avg_wer:.4f}, CER: {avg_cer:.4f}")
142
  print(f"Weighted average WER: {weighted_wer:.4f}, CER: {weighted_cer:.4f}")
143
- print(f"Processed {len(per_sample_metrics)} valid samples")
144
 
145
- return avg_wer, avg_cer, weighted_wer, weighted_cer, per_sample_metrics
146
-
147
- def styled_error(message):
148
- """Format error messages with red styling"""
149
- return f"<div style='color: red; font-weight: bold; padding: 10px; border-radius: 5px; background-color: #ffe0e0;'>{message}</div>"
150
-
151
- def styled_success(message):
152
- """Format success messages with green styling"""
153
- return f"<div style='color: green; font-weight: bold; padding: 10px; border-radius: 5px; background-color: #e0ffe0;'>{message}</div>"
154
-
155
- def styled_info(message):
156
- """Format informational messages with blue styling"""
157
- return f"<div style='color: #004080; padding: 10px; border-radius: 5px; background-color: #e0f0ff;'>{message}</div>"
158
 
159
  def process_submission(submitter_name, csv_file):
160
- """
161
- Process a submission CSV, calculate metrics, and update the leaderboard.
162
- Returns a status message and updated leaderboard.
163
- """
164
  try:
165
- # Validate submitter name
166
- if not submitter_name or len(submitter_name.strip()) < 3:
167
- return styled_error("Please provide a valid submitter name (at least 3 characters)"), None
168
-
169
  # Read and validate the uploaded CSV
170
  df = pd.read_csv(csv_file)
171
  print(f"Processing submission from {submitter_name} with {len(df)} rows")
172
 
173
- # Basic validation
174
  if len(df) == 0:
175
- return styled_error("Error: Uploaded CSV is empty."), None
176
-
177
- if len(df) < 10:
178
- return styled_error("Error: Submission contains too few samples (minimum 10 required)."), None
179
 
180
  if set(df.columns) != {"id", "text"}:
181
- return styled_error(f"Error: CSV must contain exactly 'id' and 'text' columns. Found: {', '.join(df.columns)}"), None
182
 
183
  if df["id"].duplicated().any():
184
  dup_ids = df[df["id"].duplicated()]["id"].unique()
185
- return styled_error(f"Error: Duplicate IDs found: {', '.join(map(str, dup_ids[:5]))}."), None
186
-
187
- # Ensure text column contains strings
188
- df["text"] = df["text"].astype(str)
189
-
190
- # Check for valid references
191
- if not references:
192
- return styled_error("Error: Reference dataset could not be loaded. Please try again later."), None
193
 
194
  # Check if IDs match the reference dataset
195
  missing_ids = set(references.keys()) - set(df["id"])
196
  extra_ids = set(df["id"]) - set(references.keys())
197
 
198
  if missing_ids:
199
- return styled_error(f"Error: Missing {len(missing_ids)} IDs in submission. First few missing: {', '.join(map(str, list(missing_ids)[:5]))}."), None
200
 
201
  if extra_ids:
202
- return styled_error(f"Error: Found {len(extra_ids)} extra IDs not in reference dataset. First few extra: {', '.join(map(str, list(extra_ids)[:5]))}."), None
203
 
204
- # Check for suspicious submissions (high percentage of exact matches)
205
- exact_matches = 0
206
- for _, row in df.iterrows():
207
- if normalize_text(row["text"]) == normalize_text(references[row["id"]]):
208
- exact_matches += 1
209
-
210
- exact_match_ratio = exact_matches / len(df)
211
- if exact_match_ratio > 0.95: # If 95% exact matches, likely copying reference
212
- return styled_error("Suspicious submission: Too many exact matches with reference texts."), None
213
-
214
- # Calculate metrics
215
  try:
216
  avg_wer, avg_cer, weighted_wer, weighted_cer, detailed_results = calculate_metrics(df)
217
 
@@ -221,119 +183,54 @@ def process_submission(submitter_name, csv_file):
221
  print(f"Processed {len(detailed_results)} valid samples")
222
 
223
  # Check for suspiciously low values
224
- if avg_wer < 0.001 or weighted_wer < 0.001:
225
  print("WARNING: WER is extremely low - likely an error")
226
- return styled_error("Error: WER calculation yielded suspicious results (near-zero). Please check your submission CSV."), None
227
 
228
  except Exception as e:
229
  print(f"Error in metrics calculation: {str(e)}")
230
- return styled_error(f"Error calculating metrics: {str(e)}"), None
231
 
232
  # Update the leaderboard
233
  leaderboard = pd.read_csv(leaderboard_file)
234
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
235
-
236
  new_entry = pd.DataFrame(
237
- [[submitter_name, avg_wer, avg_cer, weighted_wer, weighted_cer, len(detailed_results), timestamp]],
238
- columns=["submitter", "WER", "CER", "weighted_WER", "weighted_CER", "samples_evaluated", "timestamp"]
239
  )
 
 
240
 
241
- # Combine with existing leaderboard and keep only the best submission per submitter
242
- combined = pd.concat([leaderboard, new_entry])
243
- # Sort by WER (ascending) and get first entry for each submitter
244
- best_entries = combined.sort_values("WER").groupby("submitter").first().reset_index()
245
- # Sort the resulting dataframe by WER
246
- updated_leaderboard = best_entries.sort_values("WER")
247
- updated_leaderboard.to_csv(leaderboard_file, index=False)
248
-
249
- # Create detailed metrics summary
250
- metrics_summary = f"""
251
- <h3>Submission Results</h3>
252
- <table>
253
- <tr><td><b>Submitter:</b></td><td>{submitter_name}</td></tr>
254
- <tr><td><b>Word Error Rate (WER):</b></td><td>{avg_wer:.4f}</td></tr>
255
- <tr><td><b>Character Error Rate (CER):</b></td><td>{avg_cer:.4f}</td></tr>
256
- <tr><td><b>Weighted WER:</b></td><td>{weighted_wer:.4f}</td></tr>
257
- <tr><td><b>Weighted CER:</b></td><td>{weighted_cer:.4f}</td></tr>
258
- <tr><td><b>Samples Evaluated:</b></td><td>{len(detailed_results)}</td></tr>
259
- <tr><td><b>Submission Time:</b></td><td>{timestamp}</td></tr>
260
- </table>
261
- """
262
-
263
- return styled_success(f"Submission processed successfully!") + styled_info(metrics_summary), updated_leaderboard
264
 
265
  except Exception as e:
266
  print(f"Error processing submission: {str(e)}")
267
- return styled_error(f"Error processing submission: {str(e)}"), None
268
 
269
  # Create the Gradio interface
270
  with gr.Blocks(title="Bambara ASR Leaderboard") as demo:
271
  gr.Markdown(
272
  """
273
  # Bambara ASR Leaderboard
274
-
275
  Upload a CSV file with 'id' and 'text' columns to evaluate your ASR predictions.
276
  The 'id's must match those in the dataset.
277
 
278
- ## Metrics
279
- - **WER**: Word Error Rate (lower is better) - measures word-level accuracy
280
- - **CER**: Character Error Rate (lower is better) - measures character-level accuracy
281
-
282
- We report both standard averages and length-weighted averages (where longer samples have more influence on the final score).
283
  """
284
  )
285
 
286
  with gr.Row():
287
- with gr.Column(scale=1):
288
- submitter = gr.Textbox(
289
- label="Submitter Name or Model Name",
290
- placeholder="e.g., MALIBA-AI/asr"
291
- )
292
- csv_upload = gr.File(
293
- label="Upload CSV File",
294
- file_types=[".csv"]
295
- )
296
- submit_btn = gr.Button("Submit", variant="primary")
297
-
298
- with gr.Column(scale=2):
299
- with gr.Accordion("Submission Format", open=False):
300
- gr.Markdown(
301
- """
302
- ### CSV Format Requirements
303
-
304
- Your CSV file must:
305
- - Have exactly two columns: `id` and `text`
306
- - The `id` column must match the IDs in the reference dataset
307
- - The `text` column should contain your model's transcriptions
308
-
309
- Example:
310
- ```
311
- id,text
312
- audio_001,n ye foro ka taa
313
- audio_002,i ni ce
314
- ```
315
-
316
- ### Evaluation Process
317
-
318
- Your submissions are evaluated by:
319
- 1. Normalizing both reference and predicted text (lowercase, punctuation removal)
320
- 2. Calculating Word Error Rate (WER) and Character Error Rate (CER)
321
- 3. Computing both simple average and length-weighted average
322
- 4. Ranking on the leaderboard by WER (lower is better)
323
-
324
- Only your best submission is kept on the leaderboard.
325
- """
326
- )
327
-
328
- output_msg = gr.HTML(label="Status")
329
-
330
- # Leaderboard display
331
- with gr.Accordion("Leaderboard", open=True):
332
- leaderboard_display = gr.DataFrame(
333
- label="Current Standings",
334
- value=pd.read_csv(leaderboard_file),
335
- interactive=False
336
- )
337
 
338
  submit_btn.click(
339
  fn=process_submission,
 
40
  # Initialize leaderboard file
41
  leaderboard_file = "leaderboard.csv"
42
  if not os.path.exists(leaderboard_file):
43
+ pd.DataFrame(columns=["submitter", "WER", "CER", "timestamp"]).to_csv(leaderboard_file, index=False)
44
  else:
45
  print(f"Loaded existing leaderboard with {len(pd.read_csv(leaderboard_file))} entries")
46
 
47
  def normalize_text(text):
48
  """
49
+ Normalize text for WER/CER calculation:
50
+ - Convert to lowercase
51
+ - Remove punctuation
52
+ - Replace multiple spaces with single space
53
+ - Strip leading/trailing spaces
54
  """
55
  if not isinstance(text, str):
56
  text = str(text)
57
 
58
+ # Convert to lowercase
59
  text = text.lower()
60
 
61
  # Remove punctuation, keeping spaces
 
67
  return text
68
 
69
  def calculate_metrics(predictions_df):
70
+ """Calculate WER and CER for predictions."""
71
+ results = []
 
 
 
72
  total_ref_words = 0
73
  total_ref_chars = 0
74
 
 
75
  for _, row in predictions_df.iterrows():
76
  id_val = row["id"]
77
  if id_val not in references:
 
81
  reference = normalize_text(references[id_val])
82
  hypothesis = normalize_text(row["text"])
83
 
84
+ # Print detailed info for first few entries
85
+ if len(results) < 5:
86
+ print(f"ID: {id_val}")
87
+ print(f"Reference: '{reference}'")
88
+ print(f"Hypothesis: '{hypothesis}'")
89
+
90
+ # Skip empty strings
91
  if not reference or not hypothesis:
92
  print(f"Warning: Empty reference or hypothesis for ID {id_val}")
93
  continue
94
 
95
+ # Split into words for jiwer
96
  reference_words = reference.split()
97
+ hypothesis_words = hypothesis.split()
98
  reference_chars = list(reference)
99
 
100
+ if len(results) < 5:
 
 
 
 
 
 
 
 
 
101
  print(f"Reference words: {reference_words}")
102
+ print(f"Hypothesis words: {hypothesis_words}")
103
 
104
+ # Calculate metrics
105
  try:
106
  # Calculate WER and CER
107
  sample_wer = wer(reference, hypothesis)
 
115
  total_ref_words += len(reference_words)
116
  total_ref_chars += len(reference_chars)
117
 
118
+ if len(results) < 5:
119
  print(f"WER: {sample_wer}, CER: {sample_cer}")
120
+
121
+ results.append({
122
  "id": id_val,
123
  "reference": reference,
124
  "hypothesis": hypothesis,
 
130
  except Exception as e:
131
  print(f"Error calculating metrics for ID {id_val}: {str(e)}")
132
 
133
+ if not results:
134
  raise ValueError("No valid samples for WER/CER calculation")
135
+
136
  # Calculate standard average metrics
137
+ avg_wer = sum(item["wer"] for item in results) / len(results)
138
+ avg_cer = sum(item["cer"] for item in results) / len(results)
139
 
140
  # Calculate weighted average metrics based on reference length
141
+ weighted_wer = sum(item["wer"] * item["ref_word_count"] for item in results) / total_ref_words
142
+ weighted_cer = sum(item["cer"] * item["ref_char_count"] for item in results) / total_ref_chars
143
 
144
  print(f"Simple average WER: {avg_wer:.4f}, CER: {avg_cer:.4f}")
145
  print(f"Weighted average WER: {weighted_wer:.4f}, CER: {weighted_cer:.4f}")
146
+ print(f"Processed {len(results)} valid samples")
147
 
148
+ return avg_wer, avg_cer, weighted_wer, weighted_cer, results
 
 
 
 
 
 
 
 
 
 
 
 
149
 
150
  def process_submission(submitter_name, csv_file):
 
 
 
 
151
  try:
 
 
 
 
152
  # Read and validate the uploaded CSV
153
  df = pd.read_csv(csv_file)
154
  print(f"Processing submission from {submitter_name} with {len(df)} rows")
155
 
 
156
  if len(df) == 0:
157
+ return "Error: Uploaded CSV is empty.", None
 
 
 
158
 
159
  if set(df.columns) != {"id", "text"}:
160
+ return f"Error: CSV must contain exactly 'id' and 'text' columns. Found: {', '.join(df.columns)}", None
161
 
162
  if df["id"].duplicated().any():
163
  dup_ids = df[df["id"].duplicated()]["id"].unique()
164
+ return f"Error: Duplicate IDs found: {', '.join(map(str, dup_ids[:5]))}", None
 
 
 
 
 
 
 
165
 
166
  # Check if IDs match the reference dataset
167
  missing_ids = set(references.keys()) - set(df["id"])
168
  extra_ids = set(df["id"]) - set(references.keys())
169
 
170
  if missing_ids:
171
+ return f"Error: Missing {len(missing_ids)} IDs in submission. First few missing: {', '.join(map(str, list(missing_ids)[:5]))}", None
172
 
173
  if extra_ids:
174
+ return f"Error: Found {len(extra_ids)} extra IDs not in reference dataset. First few extra: {', '.join(map(str, list(extra_ids)[:5]))}", None
175
 
176
+ # Calculate WER and CER
 
 
 
 
 
 
 
 
 
 
177
  try:
178
  avg_wer, avg_cer, weighted_wer, weighted_cer, detailed_results = calculate_metrics(df)
179
 
 
183
  print(f"Processed {len(detailed_results)} valid samples")
184
 
185
  # Check for suspiciously low values
186
+ if avg_wer < 0.001:
187
  print("WARNING: WER is extremely low - likely an error")
188
+ return "Error: WER calculation yielded suspicious results (near-zero). Please check your submission CSV.", None
189
 
190
  except Exception as e:
191
  print(f"Error in metrics calculation: {str(e)}")
192
+ return f"Error calculating metrics: {str(e)}", None
193
 
194
  # Update the leaderboard
195
  leaderboard = pd.read_csv(leaderboard_file)
196
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
 
197
  new_entry = pd.DataFrame(
198
+ [[submitter_name, avg_wer, avg_cer, timestamp]],
199
+ columns=["submitter", "WER", "CER", "timestamp"]
200
  )
201
+ leaderboard = pd.concat([leaderboard, new_entry]).sort_values("WER")
202
+ leaderboard.to_csv(leaderboard_file, index=False)
203
 
204
+ return f"Submission processed successfully! WER: {avg_wer:.4f}, CER: {avg_cer:.4f}", leaderboard
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
 
206
  except Exception as e:
207
  print(f"Error processing submission: {str(e)}")
208
+ return f"Error processing submission: {str(e)}", None
209
 
210
  # Create the Gradio interface
211
  with gr.Blocks(title="Bambara ASR Leaderboard") as demo:
212
  gr.Markdown(
213
  """
214
  # Bambara ASR Leaderboard
 
215
  Upload a CSV file with 'id' and 'text' columns to evaluate your ASR predictions.
216
  The 'id's must match those in the dataset.
217
 
218
+ - **WER**: Word Error Rate (lower is better).
219
+ - **CER**: Character Error Rate (lower is better).
 
 
 
220
  """
221
  )
222
 
223
  with gr.Row():
224
+ submitter = gr.Textbox(label="Submitter Name or Model Name", placeholder="e.g., MALIBA-AI/asr")
225
+ csv_upload = gr.File(label="Upload CSV File", file_types=[".csv"])
226
+
227
+ submit_btn = gr.Button("Submit")
228
+ output_msg = gr.Textbox(label="Status", interactive=False)
229
+ leaderboard_display = gr.DataFrame(
230
+ label="Leaderboard",
231
+ value=pd.read_csv(leaderboard_file),
232
+ interactive=False
233
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
 
235
  submit_btn.click(
236
  fn=process_submission,