sudoping01 commited on
Commit
f81f1e2
·
verified ·
1 Parent(s): 9174c46

Update app.py

Browse files

redefine based on open asr leaderboard package

Files changed (1) hide show
  1. app.py +205 -76
app.py CHANGED
@@ -8,46 +8,59 @@ import re
8
 
9
  from huggingface_hub import login
10
 
 
11
  token = os.environ.get("HG_TOKEN")
12
-
 
 
 
13
  login(token)
14
 
15
  print("Loading dataset...")
16
- dataset = load_dataset("sudoping01/bambara-speech-recognition-benchmark", name="default")["eval"]
17
- references = {row["id"]: row["text"] for row in dataset}
 
 
 
 
 
 
 
18
 
 
19
  leaderboard_file = "leaderboard.csv"
20
  if not os.path.exists(leaderboard_file):
21
- pd.DataFrame(columns=["submitter", "WER", "CER", "timestamp"]).to_csv(leaderboard_file, index=False)
22
  else:
23
  print(f"Loaded existing leaderboard with {len(pd.read_csv(leaderboard_file))} entries")
24
 
25
  def normalize_text(text):
26
  """
27
- Normalize text for WER/CER calculation:
28
- - Convert to lowercase
29
- - Remove punctuation
30
- - Replace multiple spaces with single space
31
- - Strip leading/trailing spaces
32
  """
33
  if not isinstance(text, str):
34
  text = str(text)
35
 
36
- # Convert to lowercase
37
  text = text.lower()
38
 
39
- # # Remove punctuation, keeping spaces
40
- # text = re.sub(r'[^\w\s]', '', text)
41
 
42
- # # Normalize whitespace
43
- # text = re.sub(r'\s+', ' ', text).strip()
44
 
45
  return text
46
 
47
  def calculate_metrics(predictions_df):
48
- """Calculate WER and CER for predictions."""
49
- results = []
 
 
 
 
 
50
 
 
51
  for _, row in predictions_df.iterrows():
52
  id_val = row["id"]
53
  if id_val not in references:
@@ -57,146 +70,262 @@ def calculate_metrics(predictions_df):
57
  reference = normalize_text(references[id_val])
58
  hypothesis = normalize_text(row["text"])
59
 
60
- # Print detailed info for first few entries
61
- if len(results) < 5:
62
- print(f"ID: {id_val}")
63
- print(f"Reference: '{reference}'")
64
- print(f"Hypothesis: '{hypothesis}'")
65
-
66
- # Skip empty strings
67
  if not reference or not hypothesis:
68
  print(f"Warning: Empty reference or hypothesis for ID {id_val}")
69
  continue
70
 
71
- # Split into words for jiwer
72
  reference_words = reference.split()
73
- hypothesis_words = hypothesis.split()
 
 
 
 
 
74
 
75
- if len(results) < 5:
 
 
 
 
76
  print(f"Reference words: {reference_words}")
77
- print(f"Hypothesis words: {hypothesis_words}")
78
 
79
- # Calculate metrics
80
  try:
81
- # Make sure we're not comparing identical strings
82
- if reference == hypothesis:
83
- print(f"Warning: Identical strings for ID {id_val}")
84
- # Force a small difference if the strings are identical
85
- # This is for debugging - remove in production if needed
86
- if len(hypothesis_words) > 0:
87
- # Add a dummy word to force non-zero WER
88
- hypothesis_words.append("dummy_debug_token")
89
- hypothesis = " ".join(hypothesis_words)
90
-
91
  # Calculate WER and CER
92
  sample_wer = wer(reference, hypothesis)
93
  sample_cer = cer(reference, hypothesis)
94
 
95
- if len(results) < 5:
 
 
 
 
 
 
 
 
96
  print(f"WER: {sample_wer}, CER: {sample_cer}")
97
-
98
- results.append({
99
  "id": id_val,
100
  "reference": reference,
101
  "hypothesis": hypothesis,
 
 
102
  "wer": sample_wer,
103
  "cer": sample_cer
104
  })
105
  except Exception as e:
106
  print(f"Error calculating metrics for ID {id_val}: {str(e)}")
107
 
108
- if not results:
109
  raise ValueError("No valid samples for WER/CER calculation")
110
-
111
- # Calculate average metrics
112
- avg_wer = sum(item["wer"] for item in results) / len(results)
113
- avg_cer = sum(item["cer"] for item in results) / len(results)
114
 
115
- return avg_wer, avg_cer, results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
  def process_submission(submitter_name, csv_file):
 
 
 
 
118
  try:
 
 
 
 
119
  # Read and validate the uploaded CSV
120
  df = pd.read_csv(csv_file)
121
  print(f"Processing submission from {submitter_name} with {len(df)} rows")
122
 
 
123
  if len(df) == 0:
124
- return "Error: Uploaded CSV is empty.", None
 
 
 
125
 
126
  if set(df.columns) != {"id", "text"}:
127
- return f"Error: CSV must contain exactly 'id' and 'text' columns. Found: {', '.join(df.columns)}", None
128
 
129
  if df["id"].duplicated().any():
130
  dup_ids = df[df["id"].duplicated()]["id"].unique()
131
- return f"Error: Duplicate IDs found: {', '.join(map(str, dup_ids[:5]))}", None
 
 
 
 
 
 
 
132
 
133
  # Check if IDs match the reference dataset
134
  missing_ids = set(references.keys()) - set(df["id"])
135
  extra_ids = set(df["id"]) - set(references.keys())
136
 
137
  if missing_ids:
138
- return f"Error: Missing {len(missing_ids)} IDs in submission. First few missing: {', '.join(map(str, list(missing_ids)[:5]))}", None
139
 
140
  if extra_ids:
141
- return f"Error: Found {len(extra_ids)} extra IDs not in reference dataset. First few extra: {', '.join(map(str, list(extra_ids)[:5]))}", None
 
 
 
 
 
 
142
 
143
- # Calculate WER and CER
 
 
 
 
144
  try:
145
- avg_wer, avg_cer, detailed_results = calculate_metrics(df)
146
 
147
  # Debug information
148
  print(f"Calculated metrics - WER: {avg_wer:.4f}, CER: {avg_cer:.4f}")
 
149
  print(f"Processed {len(detailed_results)} valid samples")
150
 
151
  # Check for suspiciously low values
152
- if avg_wer < 0.001:
153
  print("WARNING: WER is extremely low - likely an error")
154
- return "Error: WER calculation yielded suspicious results (near-zero). Please check your submission CSV.", None
155
 
156
  except Exception as e:
157
  print(f"Error in metrics calculation: {str(e)}")
158
- return f"Error calculating metrics: {str(e)}", None
159
 
160
  # Update the leaderboard
161
  leaderboard = pd.read_csv(leaderboard_file)
162
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
 
163
  new_entry = pd.DataFrame(
164
- [[submitter_name, avg_wer, avg_cer, timestamp]],
165
- columns=["submitter", "WER", "CER", "timestamp"]
166
  )
167
- leaderboard = pd.concat([leaderboard, new_entry]).sort_values("WER")
168
- leaderboard.to_csv(leaderboard_file, index=False)
169
 
170
- return f"Submission processed successfully! WER: {avg_wer:.4f}, CER: {avg_cer:.4f}", leaderboard
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
 
172
  except Exception as e:
173
  print(f"Error processing submission: {str(e)}")
174
- return f"Error processing submission: {str(e)}", None
175
 
176
  # Create the Gradio interface
177
  with gr.Blocks(title="Bambara ASR Leaderboard") as demo:
178
  gr.Markdown(
179
  """
180
  # Bambara ASR Leaderboard
 
181
  Upload a CSV file with 'id' and 'text' columns to evaluate your ASR predictions.
182
  The 'id's must match those in the dataset.
183
- [View the dataset here](https://huggingface.co/datasets/MALIBA-AI/bambara_general_leaderboard_dataset).
184
- - **WER**: Word Error Rate (lower is better).
185
- - **CER**: Character Error Rate (lower is better).
 
 
 
186
  """
187
  )
188
 
189
  with gr.Row():
190
- submitter = gr.Textbox(label="Submitter Name or Model Name", placeholder="e.g., MALIBA-AI/asr")
191
- csv_upload = gr.File(label="Upload CSV File", file_types=[".csv"])
192
-
193
- submit_btn = gr.Button("Submit")
194
- output_msg = gr.Textbox(label="Status", interactive=False)
195
- leaderboard_display = gr.DataFrame(
196
- label="Leaderboard",
197
- value=pd.read_csv(leaderboard_file),
198
- interactive=False
199
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
  submit_btn.click(
202
  fn=process_submission,
 
8
 
9
  from huggingface_hub import login
10
 
11
+ # Authentication setup
12
  token = os.environ.get("HG_TOKEN")
13
+ print(f"Token exists: {token is not None}")
14
+ if token:
15
+ print(f"Token length: {len(token)}")
16
+ print(f"Token first few chars: {token[:4]}...")
17
  login(token)
18
 
19
  print("Loading dataset...")
20
+ try:
21
+ dataset = load_dataset("sudoping01/bambara-speech-recognition-benchmark", name="default", use_auth_token=token)["eval"]
22
+ print(f"Successfully loaded dataset with {len(dataset)} samples")
23
+ references = {row["id"]: row["text"] for row in dataset}
24
+ except Exception as e:
25
+ print(f"Error loading dataset: {str(e)}")
26
+ # Fallback in case dataset can't be loaded
27
+ references = {}
28
+ print("WARNING: Using empty references dictionary due to dataset loading error")
29
 
30
+ # Initialize leaderboard file
31
  leaderboard_file = "leaderboard.csv"
32
  if not os.path.exists(leaderboard_file):
33
+ pd.DataFrame(columns=["submitter", "WER", "CER", "weighted_WER", "weighted_CER", "samples_evaluated", "timestamp"]).to_csv(leaderboard_file, index=False)
34
  else:
35
  print(f"Loaded existing leaderboard with {len(pd.read_csv(leaderboard_file))} entries")
36
 
37
  def normalize_text(text):
38
  """
39
+ Normalize text by converting to lowercase, removing punctuation, and normalizing whitespace.
 
 
 
 
40
  """
41
  if not isinstance(text, str):
42
  text = str(text)
43
 
 
44
  text = text.lower()
45
 
46
+ # Remove punctuation, keeping spaces
47
+ text = re.sub(r'[^\w\s]', '', text)
48
 
49
+ # Normalize whitespace
50
+ text = re.sub(r'\s+', ' ', text).strip()
51
 
52
  return text
53
 
54
  def calculate_metrics(predictions_df):
55
+ """
56
+ Calculate WER and CER for each sample and return averages and per-sample results.
57
+ Uses both standard average and length-weighted average.
58
+ """
59
+ per_sample_metrics = []
60
+ total_ref_words = 0
61
+ total_ref_chars = 0
62
 
63
+ # Process each sample
64
  for _, row in predictions_df.iterrows():
65
  id_val = row["id"]
66
  if id_val not in references:
 
70
  reference = normalize_text(references[id_val])
71
  hypothesis = normalize_text(row["text"])
72
 
 
 
 
 
 
 
 
73
  if not reference or not hypothesis:
74
  print(f"Warning: Empty reference or hypothesis for ID {id_val}")
75
  continue
76
 
 
77
  reference_words = reference.split()
78
+ reference_chars = list(reference)
79
+
80
+ # Skip very short references for more stable metrics
81
+ if len(reference_words) < 2:
82
+ print(f"Warning: Reference too short for ID {id_val}, skipping")
83
+ continue
84
 
85
+ # Store sample info for debugging (first few samples)
86
+ if len(per_sample_metrics) < 5:
87
+ print(f"ID: {id_val}")
88
+ print(f"Reference: '{reference}'")
89
+ print(f"Hypothesis: '{hypothesis}'")
90
  print(f"Reference words: {reference_words}")
 
91
 
 
92
  try:
 
 
 
 
 
 
 
 
 
 
93
  # Calculate WER and CER
94
  sample_wer = wer(reference, hypothesis)
95
  sample_cer = cer(reference, hypothesis)
96
 
97
+ # Cap metrics at sensible values to prevent outliers
98
+ sample_wer = min(sample_wer, 2.0) # Cap at 200% WER
99
+ sample_cer = min(sample_cer, 2.0) # Cap at 200% CER
100
+
101
+ # For weighted calculations
102
+ total_ref_words += len(reference_words)
103
+ total_ref_chars += len(reference_chars)
104
+
105
+ if len(per_sample_metrics) < 5:
106
  print(f"WER: {sample_wer}, CER: {sample_cer}")
107
+
108
+ per_sample_metrics.append({
109
  "id": id_val,
110
  "reference": reference,
111
  "hypothesis": hypothesis,
112
+ "ref_word_count": len(reference_words),
113
+ "ref_char_count": len(reference_chars),
114
  "wer": sample_wer,
115
  "cer": sample_cer
116
  })
117
  except Exception as e:
118
  print(f"Error calculating metrics for ID {id_val}: {str(e)}")
119
 
120
+ if not per_sample_metrics:
121
  raise ValueError("No valid samples for WER/CER calculation")
 
 
 
 
122
 
123
+ # Calculate standard average metrics
124
+ avg_wer = sum(item["wer"] for item in per_sample_metrics) / len(per_sample_metrics)
125
+ avg_cer = sum(item["cer"] for item in per_sample_metrics) / len(per_sample_metrics)
126
+
127
+ # Calculate weighted average metrics based on reference length
128
+ weighted_wer = sum(item["wer"] * item["ref_word_count"] for item in per_sample_metrics) / total_ref_words
129
+ weighted_cer = sum(item["cer"] * item["ref_char_count"] for item in per_sample_metrics) / total_ref_chars
130
+
131
+ print(f"Simple average WER: {avg_wer:.4f}, CER: {avg_cer:.4f}")
132
+ print(f"Weighted average WER: {weighted_wer:.4f}, CER: {weighted_cer:.4f}")
133
+ print(f"Processed {len(per_sample_metrics)} valid samples")
134
+
135
+ return avg_wer, avg_cer, weighted_wer, weighted_cer, per_sample_metrics
136
+
137
+ def styled_error(message):
138
+ """Format error messages with red styling"""
139
+ return f"<div style='color: red; font-weight: bold; padding: 10px; border-radius: 5px; background-color: #ffe0e0;'>{message}</div>"
140
+
141
+ def styled_success(message):
142
+ """Format success messages with green styling"""
143
+ return f"<div style='color: green; font-weight: bold; padding: 10px; border-radius: 5px; background-color: #e0ffe0;'>{message}</div>"
144
+
145
+ def styled_info(message):
146
+ """Format informational messages with blue styling"""
147
+ return f"<div style='color: #004080; padding: 10px; border-radius: 5px; background-color: #e0f0ff;'>{message}</div>"
148
 
149
  def process_submission(submitter_name, csv_file):
150
+ """
151
+ Process a submission CSV, calculate metrics, and update the leaderboard.
152
+ Returns a status message and updated leaderboard.
153
+ """
154
  try:
155
+ # Validate submitter name
156
+ if not submitter_name or len(submitter_name.strip()) < 3:
157
+ return styled_error("Please provide a valid submitter name (at least 3 characters)"), None
158
+
159
  # Read and validate the uploaded CSV
160
  df = pd.read_csv(csv_file)
161
  print(f"Processing submission from {submitter_name} with {len(df)} rows")
162
 
163
+ # Basic validation
164
  if len(df) == 0:
165
+ return styled_error("Error: Uploaded CSV is empty."), None
166
+
167
+ if len(df) < 10:
168
+ return styled_error("Error: Submission contains too few samples (minimum 10 required)."), None
169
 
170
  if set(df.columns) != {"id", "text"}:
171
+ return styled_error(f"Error: CSV must contain exactly 'id' and 'text' columns. Found: {', '.join(df.columns)}"), None
172
 
173
  if df["id"].duplicated().any():
174
  dup_ids = df[df["id"].duplicated()]["id"].unique()
175
+ return styled_error(f"Error: Duplicate IDs found: {', '.join(map(str, dup_ids[:5]))}."), None
176
+
177
+ # Ensure text column contains strings
178
+ df["text"] = df["text"].astype(str)
179
+
180
+ # Check for valid references
181
+ if not references:
182
+ return styled_error("Error: Reference dataset could not be loaded. Please try again later."), None
183
 
184
  # Check if IDs match the reference dataset
185
  missing_ids = set(references.keys()) - set(df["id"])
186
  extra_ids = set(df["id"]) - set(references.keys())
187
 
188
  if missing_ids:
189
+ return styled_error(f"Error: Missing {len(missing_ids)} IDs in submission. First few missing: {', '.join(map(str, list(missing_ids)[:5]))}."), None
190
 
191
  if extra_ids:
192
+ return styled_error(f"Error: Found {len(extra_ids)} extra IDs not in reference dataset. First few extra: {', '.join(map(str, list(extra_ids)[:5]))}."), None
193
+
194
+ # Check for suspicious submissions (high percentage of exact matches)
195
+ exact_matches = 0
196
+ for _, row in df.iterrows():
197
+ if normalize_text(row["text"]) == normalize_text(references[row["id"]]):
198
+ exact_matches += 1
199
 
200
+ exact_match_ratio = exact_matches / len(df)
201
+ if exact_match_ratio > 0.95: # If 95% exact matches, likely copying reference
202
+ return styled_error("Suspicious submission: Too many exact matches with reference texts."), None
203
+
204
+ # Calculate metrics
205
  try:
206
+ avg_wer, avg_cer, weighted_wer, weighted_cer, detailed_results = calculate_metrics(df)
207
 
208
  # Debug information
209
  print(f"Calculated metrics - WER: {avg_wer:.4f}, CER: {avg_cer:.4f}")
210
+ print(f"Weighted metrics - WER: {weighted_wer:.4f}, CER: {weighted_cer:.4f}")
211
  print(f"Processed {len(detailed_results)} valid samples")
212
 
213
  # Check for suspiciously low values
214
+ if avg_wer < 0.001 or weighted_wer < 0.001:
215
  print("WARNING: WER is extremely low - likely an error")
216
+ return styled_error("Error: WER calculation yielded suspicious results (near-zero). Please check your submission CSV."), None
217
 
218
  except Exception as e:
219
  print(f"Error in metrics calculation: {str(e)}")
220
+ return styled_error(f"Error calculating metrics: {str(e)}"), None
221
 
222
  # Update the leaderboard
223
  leaderboard = pd.read_csv(leaderboard_file)
224
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
225
+
226
  new_entry = pd.DataFrame(
227
+ [[submitter_name, avg_wer, avg_cer, weighted_wer, weighted_cer, len(detailed_results), timestamp]],
228
+ columns=["submitter", "WER", "CER", "weighted_WER", "weighted_CER", "samples_evaluated", "timestamp"]
229
  )
 
 
230
 
231
+ # Combine with existing leaderboard and keep only the best submission per submitter
232
+ combined = pd.concat([leaderboard, new_entry])
233
+ # Sort by WER (ascending) and get first entry for each submitter
234
+ best_entries = combined.sort_values("WER").groupby("submitter").first().reset_index()
235
+ # Sort the resulting dataframe by WER
236
+ updated_leaderboard = best_entries.sort_values("WER")
237
+ updated_leaderboard.to_csv(leaderboard_file, index=False)
238
+
239
+ # Create detailed metrics summary
240
+ metrics_summary = f"""
241
+ <h3>Submission Results</h3>
242
+ <table>
243
+ <tr><td><b>Submitter:</b></td><td>{submitter_name}</td></tr>
244
+ <tr><td><b>Word Error Rate (WER):</b></td><td>{avg_wer:.4f}</td></tr>
245
+ <tr><td><b>Character Error Rate (CER):</b></td><td>{avg_cer:.4f}</td></tr>
246
+ <tr><td><b>Weighted WER:</b></td><td>{weighted_wer:.4f}</td></tr>
247
+ <tr><td><b>Weighted CER:</b></td><td>{weighted_cer:.4f}</td></tr>
248
+ <tr><td><b>Samples Evaluated:</b></td><td>{len(detailed_results)}</td></tr>
249
+ <tr><td><b>Submission Time:</b></td><td>{timestamp}</td></tr>
250
+ </table>
251
+ """
252
+
253
+ return styled_success(f"Submission processed successfully!") + styled_info(metrics_summary), updated_leaderboard
254
 
255
  except Exception as e:
256
  print(f"Error processing submission: {str(e)}")
257
+ return styled_error(f"Error processing submission: {str(e)}"), None
258
 
259
  # Create the Gradio interface
260
  with gr.Blocks(title="Bambara ASR Leaderboard") as demo:
261
  gr.Markdown(
262
  """
263
  # Bambara ASR Leaderboard
264
+
265
  Upload a CSV file with 'id' and 'text' columns to evaluate your ASR predictions.
266
  The 'id's must match those in the dataset.
267
+
268
+ ## Metrics
269
+ - **WER**: Word Error Rate (lower is better) - measures word-level accuracy
270
+ - **CER**: Character Error Rate (lower is better) - measures character-level accuracy
271
+
272
+ We report both standard averages and length-weighted averages (where longer samples have more influence on the final score).
273
  """
274
  )
275
 
276
  with gr.Row():
277
+ with gr.Column(scale=1):
278
+ submitter = gr.Textbox(
279
+ label="Submitter Name or Model Name",
280
+ placeholder="e.g., MALIBA-AI/asr",
281
+ info="Name to appear on the leaderboard"
282
+ )
283
+ csv_upload = gr.File(
284
+ label="Upload CSV File",
285
+ file_types=[".csv"],
286
+ info="CSV must have 'id' and 'text' columns"
287
+ )
288
+ submit_btn = gr.Button("Submit", variant="primary")
289
+
290
+ with gr.Column(scale=2):
291
+ with gr.Accordion("Submission Format", open=False):
292
+ gr.Markdown(
293
+ """
294
+ ### CSV Format Requirements
295
+
296
+ Your CSV file must:
297
+ - Have exactly two columns: `id` and `text`
298
+ - The `id` column must match the IDs in the reference dataset
299
+ - The `text` column should contain your model's transcriptions
300
+
301
+ Example:
302
+ ```
303
+ id,text
304
+ audio_001,n ye foro ka taa
305
+ audio_002,i ni ce
306
+ ```
307
+
308
+ ### Evaluation Process
309
+
310
+ Your submissions are evaluated by:
311
+ 1. Normalizing both reference and predicted text (lowercase, punctuation removal)
312
+ 2. Calculating Word Error Rate (WER) and Character Error Rate (CER)
313
+ 3. Computing both simple average and length-weighted average
314
+ 4. Ranking on the leaderboard by WER (lower is better)
315
+
316
+ Only your best submission is kept on the leaderboard.
317
+ """
318
+ )
319
+
320
+ output_msg = gr.HTML(label="Status")
321
+
322
+ # Leaderboard display
323
+ with gr.Accordion("Leaderboard", open=True):
324
+ leaderboard_display = gr.DataFrame(
325
+ label="Current Standings",
326
+ value=pd.read_csv(leaderboard_file),
327
+ interactive=False
328
+ )
329
 
330
  submit_btn.click(
331
  fn=process_submission,