sudoping01 commited on
Commit
9ba8fab
·
verified ·
1 Parent(s): 2faa5f7

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -0
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ from datasets import load_dataset
4
+ from jiwer import wer, cer, transforms
5
+ import os
6
+ from datetime import datetime
7
+
8
+
9
+ transform = transforms.Compose([
10
+ transforms.RemovePunctuation(),
11
+ transforms.ToLowerCase(),
12
+ transforms.RemoveWhiteSpace(replace_by_space=True),
13
+ ])
14
+
15
+
16
+ dataset = load_dataset("sudoping01/bambara-asr-benchmark", name="default")["train"]
17
+ references = {row["id"]: row["text"] for row in dataset}
18
+
19
+
20
+ leaderboard_file = "leaderboard.csv"
21
+ if not os.path.exists(leaderboard_file):
22
+ pd.DataFrame(columns=["submitter", "WER", "CER", "timestamp"]).to_csv(leaderboard_file, index=False)
23
+
24
+ def process_submission(submitter_name, csv_file):
25
+
26
+ try:
27
+ # Read and validate the uploaded CSV
28
+ df = pd.read_csv(csv_file)
29
+ if set(df.columns) != {"id", "prediction"}:
30
+ return "Error: CSV must contain exactly 'id' and 'prediction' columns.", None
31
+ if df["id"].duplicated().any():
32
+ return "Error: Duplicate 'id's found in the CSV.", None
33
+ if set(df["id"]) != set(references.keys()):
34
+ return "Error: CSV 'id's must match the dataset 'id's.", None
35
+
36
+ # Calculate WER and CER for each prediction
37
+ wers, cers = [], []
38
+ for _, row in df.iterrows():
39
+ ref = references[row["id"]]
40
+ pred = row["prediction"]
41
+ wers.append(wer(ref, pred, standardize=transform))
42
+ cers.append(cer(ref, pred, standardize=transform))
43
+
44
+ # Compute average WER and CER
45
+ avg_wer = sum(wers) / len(wers)
46
+ avg_cer = sum(cers) / len(cers)
47
+
48
+ # Update the leaderboard
49
+ leaderboard = pd.read_csv(leaderboard_file)
50
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
51
+ new_entry = pd.DataFrame(
52
+ [[submitter_name, avg_wer, avg_cer, timestamp]],
53
+ columns=["submitter", "WER", "CER", "timestamp"]
54
+ )
55
+ leaderboard = pd.concat([leaderboard, new_entry]).sort_values("WER")
56
+ leaderboard.to_csv(leaderboard_file, index=False)
57
+
58
+ return "Submission processed successfully!", leaderboard
59
+ except Exception as e:
60
+ return f"Error processing submission: {str(e)}", None
61
+
62
+ # Create the Gradio interface
63
+ with gr.Blocks(title="Bambara ASR Leaderboard") as demo:
64
+ gr.Markdown(
65
+ """
66
+ # Bambara ASR Leaderboard
67
+ Upload a CSV file with 'id' and 'text' columns to evaluate your ASR predictions.
68
+ The 'id's must match those in the dataset.
69
+ [View the dataset here](https://huggingface.co/datasets/MALIBA-AI/bambara_general_leaderboard_dataset).
70
+
71
+ - **WER**: Word Error Rate (lower is better).
72
+ - **CER**: Character Error Rate (lower is better).
73
+ """
74
+ )
75
+ with gr.Row():
76
+ submitter = gr.Textbox(label="Submitter Name or Model Name", placeholder="e.g., MALIBA-AI/asr")
77
+ csv_upload = gr.File(label="Upload CSV File", file_types=[".csv"])
78
+ submit_btn = gr.Button("Submit")
79
+ output_msg = gr.Textbox(label="Status", interactive=False)
80
+ leaderboard_display = gr.DataFrame(
81
+ label="Leaderboard",
82
+ value=pd.read_csv(leaderboard_file),
83
+ interactive=False
84
+ )
85
+
86
+ submit_btn.click(
87
+ fn=process_submission,
88
+ inputs=[submitter, csv_upload],
89
+ outputs=[output_msg, leaderboard_display]
90
+ )
91
+
92
+ demo.launch()