Spaces:
Sleeping
Sleeping
Update for chinese and fix ui (#5)
Browse files- Update for chinese and fix ui (617bfb22d00ff3c86fb7cfabba3c3c382815efd5)
Co-authored-by: Hunter S <hunterschep@users.noreply.huggingface.co>
app.py
CHANGED
@@ -12,7 +12,7 @@ import tempfile
|
|
12 |
|
13 |
# Initialize Firebase
|
14 |
firebase_config = json.loads(os.environ.get('firebase_creds'))
|
15 |
-
cred = credentials.Certificate(firebase_config)
|
16 |
firebase_admin.initialize_app(cred)
|
17 |
db = firestore.client()
|
18 |
|
@@ -21,17 +21,54 @@ MODEL_NAME = "eleferrand/xlsr53_Amis"
|
|
21 |
processor = Wav2Vec2Processor.from_pretrained(MODEL_NAME)
|
22 |
model = AutoModelForCTC.from_pretrained(MODEL_NAME)
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
def transcribe(audio_file):
|
25 |
-
"""
|
26 |
-
Transcribes the audio file using the loaded ASR model.
|
27 |
-
Returns the transcription string.
|
28 |
-
"""
|
29 |
try:
|
30 |
-
# Load and resample the audio to 16kHz
|
31 |
audio, rate = librosa.load(audio_file, sr=16000)
|
32 |
input_values = processor(audio, sampling_rate=16000, return_tensors="pt").input_values
|
33 |
-
|
34 |
-
# Get model predictions and decode to text
|
35 |
with torch.no_grad():
|
36 |
logits = model(input_values).logits
|
37 |
predicted_ids = torch.argmax(logits, dim=-1)
|
@@ -41,22 +78,12 @@ def transcribe(audio_file):
|
|
41 |
return f"Error processing file: {e}"
|
42 |
|
43 |
def transcribe_both(audio_file):
|
44 |
-
"""
|
45 |
-
Transcribes the audio and returns:
|
46 |
-
- the original transcription (non-editable textbox),
|
47 |
-
- the transcription (pre-filled for the editable textbox), and
|
48 |
-
- the processing time (in seconds).
|
49 |
-
"""
|
50 |
start_time = datetime.now()
|
51 |
transcription = transcribe(audio_file)
|
52 |
processing_time = (datetime.now() - start_time).total_seconds()
|
53 |
return transcription, transcription, processing_time
|
54 |
|
55 |
def store_correction(original_transcription, corrected_transcription, audio_file, processing_time, age, native_speaker):
|
56 |
-
"""
|
57 |
-
Stores the transcriptions and additional metadata (including user info and audio details)
|
58 |
-
in Firestore as a single document.
|
59 |
-
"""
|
60 |
try:
|
61 |
audio_metadata = {}
|
62 |
if audio_file and os.path.exists(audio_file):
|
@@ -70,7 +97,7 @@ def store_correction(original_transcription, corrected_transcription, audio_file
|
|
70 |
'timestamp': datetime.now().isoformat(),
|
71 |
'processing_time': processing_time,
|
72 |
'audio_metadata': audio_metadata,
|
73 |
-
'audio_url': None,
|
74 |
'model_name': MODEL_NAME,
|
75 |
'user_info': {
|
76 |
'native_amis_speaker': native_speaker,
|
@@ -83,34 +110,18 @@ def store_correction(original_transcription, corrected_transcription, audio_file
|
|
83 |
return f"Error saving correction: {e}"
|
84 |
|
85 |
def prepare_download(audio_file, original_transcription, corrected_transcription):
|
86 |
-
"""
|
87 |
-
Prepares a ZIP file containing:
|
88 |
-
- the uploaded audio file (as audio.wav),
|
89 |
-
- a text file with the original transcription, and
|
90 |
-
- a text file with the corrected transcription.
|
91 |
-
Returns the ZIP file's path.
|
92 |
-
"""
|
93 |
if audio_file is None:
|
94 |
return None
|
95 |
-
|
96 |
-
# Create a temporary file to avoid filename conflicts
|
97 |
tmp_zip = tempfile.NamedTemporaryFile(delete=False, suffix=".zip")
|
98 |
tmp_zip.close()
|
99 |
with zipfile.ZipFile(tmp_zip.name, "w") as zf:
|
100 |
-
# Add the audio file (renamed inside the zip)
|
101 |
if os.path.exists(audio_file):
|
102 |
zf.write(audio_file, arcname="audio.wav")
|
103 |
-
else:
|
104 |
-
print("Audio file not found:", audio_file)
|
105 |
-
|
106 |
-
# Write and add the original transcription
|
107 |
orig_txt = "original_transcription.txt"
|
108 |
with open(orig_txt, "w", encoding="utf-8") as f:
|
109 |
f.write(original_transcription)
|
110 |
zf.write(orig_txt, arcname="original_transcription.txt")
|
111 |
os.remove(orig_txt)
|
112 |
-
|
113 |
-
# Write and add the corrected transcription
|
114 |
corr_txt = "corrected_transcription.txt"
|
115 |
with open(corr_txt, "w", encoding="utf-8") as f:
|
116 |
f.write(corrected_transcription)
|
@@ -118,96 +129,117 @@ def prepare_download(audio_file, original_transcription, corrected_transcription
|
|
118 |
os.remove(corr_txt)
|
119 |
return tmp_zip.name
|
120 |
|
121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
with gr.Blocks(css="""
|
123 |
-
.container {
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
}
|
129 |
-
.
|
130 |
-
text-align: center;
|
131 |
-
margin-bottom: 30px;
|
132 |
-
}
|
133 |
-
.section {
|
134 |
-
margin-bottom: 30px;
|
135 |
-
padding: 15px;
|
136 |
-
border: 1px solid #ddd;
|
137 |
-
border-radius: 8px;
|
138 |
-
background-color: #f9f9f9;
|
139 |
-
}
|
140 |
-
.section h3 {
|
141 |
-
margin-top: 0;
|
142 |
-
margin-bottom: 15px;
|
143 |
-
text-align: center;
|
144 |
-
}
|
145 |
-
.button-row {
|
146 |
-
display: flex;
|
147 |
-
justify-content: center;
|
148 |
-
gap: 10px;
|
149 |
-
flex-wrap: wrap;
|
150 |
-
}
|
151 |
-
@media (max-width: 600px) {
|
152 |
-
.gradio-row {
|
153 |
-
flex-direction: column;
|
154 |
-
}
|
155 |
-
}
|
156 |
""") as demo:
|
|
|
|
|
157 |
with gr.Column(elem_classes="container"):
|
158 |
-
gr.
|
|
|
|
|
159 |
|
160 |
-
# Step 1
|
161 |
-
with gr.
|
162 |
-
gr.Markdown(
|
163 |
-
with gr.Row(
|
164 |
-
audio_input = gr.Audio(sources=["upload", "microphone"], type="filepath"
|
165 |
-
transcribe_button = gr.Button(
|
166 |
proc_time_state = gr.State()
|
167 |
|
168 |
-
# Step 2
|
169 |
-
with gr.
|
170 |
-
gr.Markdown(
|
171 |
-
with gr.Row(
|
172 |
-
original_text = gr.Textbox(
|
173 |
-
corrected_text = gr.Textbox(
|
174 |
|
175 |
-
# Step 3
|
176 |
-
with gr.
|
177 |
-
gr.Markdown(
|
178 |
-
with gr.Row(
|
179 |
-
age_input = gr.Slider(minimum=0, maximum=100, step=1,
|
180 |
-
native_speaker_input = gr.Checkbox(
|
181 |
|
182 |
-
# Step 4
|
183 |
-
with gr.
|
184 |
-
gr.Markdown(
|
185 |
with gr.Row(elem_classes="button-row"):
|
186 |
-
save_button = gr.Button(
|
187 |
-
save_status = gr.Textbox(
|
188 |
with gr.Row(elem_classes="button-row"):
|
189 |
-
download_button = gr.Button(
|
190 |
-
download_output = gr.File(
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
211 |
|
212 |
-
# Launch the demo
|
213 |
demo.launch(share=True)
|
|
|
12 |
|
13 |
# Initialize Firebase
|
14 |
firebase_config = json.loads(os.environ.get('firebase_creds'))
|
15 |
+
cred = credentials.Certificate(firebase_config)
|
16 |
firebase_admin.initialize_app(cred)
|
17 |
db = firestore.client()
|
18 |
|
|
|
21 |
processor = Wav2Vec2Processor.from_pretrained(MODEL_NAME)
|
22 |
model = AutoModelForCTC.from_pretrained(MODEL_NAME)
|
23 |
|
24 |
+
# Language configuration
|
25 |
+
LANGUAGE = {
|
26 |
+
"en": {
|
27 |
+
"title": "ASR Demo with Editable Transcription",
|
28 |
+
"step1": "Step 1: Audio Upload & Transcription",
|
29 |
+
"audio_input": "Audio Input",
|
30 |
+
"transcribe_btn": "Transcribe Audio",
|
31 |
+
"step2": "Step 2: Review & Edit Transcription",
|
32 |
+
"original_text": "Original Transcription",
|
33 |
+
"corrected_text": "Corrected Transcription",
|
34 |
+
"transcription_placeholder": "Transcription will appear here...",
|
35 |
+
"step3": "Step 3: User Information",
|
36 |
+
"age_label": "Age",
|
37 |
+
"native_speaker": "Native Amis Speaker",
|
38 |
+
"step4": "Step 4: Save & Download",
|
39 |
+
"save_btn": "Save Correction to Database",
|
40 |
+
"save_status": "Save Status",
|
41 |
+
"download_btn": "Download Results (ZIP)",
|
42 |
+
"status_placeholder": "Status messages will appear here...",
|
43 |
+
"toggle_lang": "中文/English"
|
44 |
+
},
|
45 |
+
"zh": {
|
46 |
+
"title": "可編輯轉寫的語音辨識演示",
|
47 |
+
"step1": "步驟一: 音頻上傳與轉寫",
|
48 |
+
"audio_input": "音頻輸入",
|
49 |
+
"transcribe_btn": "開始轉寫",
|
50 |
+
"step2": "步驟二: 校對與編輯轉寫結果",
|
51 |
+
"original_text": "原始轉寫結果",
|
52 |
+
"corrected_text": "校正後文本",
|
53 |
+
"transcription_placeholder": "轉寫結果將顯示在此處...",
|
54 |
+
"step3": "步驟三: 用戶資訊",
|
55 |
+
"age_label": "年齡",
|
56 |
+
"native_speaker": "阿美族母語者",
|
57 |
+
"step4": "步驟四: 保存與下載",
|
58 |
+
"save_btn": "保存校正結果至數據庫",
|
59 |
+
"save_status": "保存狀態",
|
60 |
+
"download_btn": "下載結果(ZIP壓縮檔)",
|
61 |
+
"status_placeholder": "狀態訊息將顯示在此處...",
|
62 |
+
"toggle_lang": "English/中文"
|
63 |
+
}
|
64 |
+
}
|
65 |
+
|
66 |
+
current_lang = gr.State(value="en")
|
67 |
+
|
68 |
def transcribe(audio_file):
|
|
|
|
|
|
|
|
|
69 |
try:
|
|
|
70 |
audio, rate = librosa.load(audio_file, sr=16000)
|
71 |
input_values = processor(audio, sampling_rate=16000, return_tensors="pt").input_values
|
|
|
|
|
72 |
with torch.no_grad():
|
73 |
logits = model(input_values).logits
|
74 |
predicted_ids = torch.argmax(logits, dim=-1)
|
|
|
78 |
return f"Error processing file: {e}"
|
79 |
|
80 |
def transcribe_both(audio_file):
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
start_time = datetime.now()
|
82 |
transcription = transcribe(audio_file)
|
83 |
processing_time = (datetime.now() - start_time).total_seconds()
|
84 |
return transcription, transcription, processing_time
|
85 |
|
86 |
def store_correction(original_transcription, corrected_transcription, audio_file, processing_time, age, native_speaker):
|
|
|
|
|
|
|
|
|
87 |
try:
|
88 |
audio_metadata = {}
|
89 |
if audio_file and os.path.exists(audio_file):
|
|
|
97 |
'timestamp': datetime.now().isoformat(),
|
98 |
'processing_time': processing_time,
|
99 |
'audio_metadata': audio_metadata,
|
100 |
+
'audio_url': None,
|
101 |
'model_name': MODEL_NAME,
|
102 |
'user_info': {
|
103 |
'native_amis_speaker': native_speaker,
|
|
|
110 |
return f"Error saving correction: {e}"
|
111 |
|
112 |
def prepare_download(audio_file, original_transcription, corrected_transcription):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
if audio_file is None:
|
114 |
return None
|
|
|
|
|
115 |
tmp_zip = tempfile.NamedTemporaryFile(delete=False, suffix=".zip")
|
116 |
tmp_zip.close()
|
117 |
with zipfile.ZipFile(tmp_zip.name, "w") as zf:
|
|
|
118 |
if os.path.exists(audio_file):
|
119 |
zf.write(audio_file, arcname="audio.wav")
|
|
|
|
|
|
|
|
|
120 |
orig_txt = "original_transcription.txt"
|
121 |
with open(orig_txt, "w", encoding="utf-8") as f:
|
122 |
f.write(original_transcription)
|
123 |
zf.write(orig_txt, arcname="original_transcription.txt")
|
124 |
os.remove(orig_txt)
|
|
|
|
|
125 |
corr_txt = "corrected_transcription.txt"
|
126 |
with open(corr_txt, "w", encoding="utf-8") as f:
|
127 |
f.write(corrected_transcription)
|
|
|
129 |
os.remove(corr_txt)
|
130 |
return tmp_zip.name
|
131 |
|
132 |
+
def toggle_language(lang):
|
133 |
+
new_lang = "zh" if lang == "en" else "en"
|
134 |
+
lang_dict = LANGUAGE[new_lang]
|
135 |
+
return [
|
136 |
+
gr.Markdown.update(value=f"<h1 class='header'>{lang_dict['title']}</h1>"),
|
137 |
+
gr.Markdown.update(value=f"### {lang_dict['step1']}"),
|
138 |
+
gr.Audio.update(label=lang_dict['audio_input']),
|
139 |
+
gr.Button.update(value=lang_dict['transcribe_btn']),
|
140 |
+
gr.Markdown.update(value=f"### {lang_dict['step2']}"),
|
141 |
+
gr.Textbox.update(label=lang_dict['original_text'], placeholder=lang_dict['transcription_placeholder']),
|
142 |
+
gr.Textbox.update(label=lang_dict['corrected_text'], placeholder=lang_dict['transcription_placeholder']),
|
143 |
+
gr.Markdown.update(value=f"### {lang_dict['step3']}"),
|
144 |
+
gr.Slider.update(label=lang_dict['age_label']),
|
145 |
+
gr.Checkbox.update(label=lang_dict['native_speaker']),
|
146 |
+
gr.Markdown.update(value=f"### {lang_dict['step4']}"),
|
147 |
+
gr.Button.update(value=lang_dict['save_btn']),
|
148 |
+
gr.Textbox.update(label=lang_dict['save_status'], placeholder=lang_dict['status_placeholder']),
|
149 |
+
gr.Button.update(value=lang_dict['download_btn']),
|
150 |
+
gr.File.update(label=lang_dict['download_btn']),
|
151 |
+
gr.Button.update(value=lang_dict['toggle_lang']),
|
152 |
+
new_lang
|
153 |
+
]
|
154 |
+
|
155 |
with gr.Blocks(css="""
|
156 |
+
.container { max-width: 800px; margin: auto; padding: 20px; font-family: Arial, sans-serif; }
|
157 |
+
.header { text-align: center; margin-bottom: 30px; }
|
158 |
+
.section { margin-bottom: 30px; padding: 15px; border: 1px solid #ddd; border-radius: 8px; background-color: #f9f9f9; }
|
159 |
+
.section h3 { margin-top: 0; margin-bottom: 15px; text-align: center; }
|
160 |
+
.button-row { display: flex; justify-content: center; gap: 10px; flex-wrap: wrap; }
|
161 |
+
.lang-toggle { position: absolute; top: 20px; right: 20px; }
|
162 |
+
@media (max-width: 600px) { .gradio-row { flex-direction: column; } }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
""") as demo:
|
164 |
+
current_lang.render()
|
165 |
+
|
166 |
with gr.Column(elem_classes="container"):
|
167 |
+
with gr.Row():
|
168 |
+
title_md = gr.Markdown(elem_classes="header")
|
169 |
+
lang_btn = gr.Button(LANGUAGE['en']['toggle_lang'], elem_classes="lang-toggle")
|
170 |
|
171 |
+
# Step 1
|
172 |
+
with gr.Column(elem_classes="section"):
|
173 |
+
step1_md = gr.Markdown()
|
174 |
+
with gr.Row():
|
175 |
+
audio_input = gr.Audio(sources=["upload", "microphone"], type="filepath")
|
176 |
+
transcribe_button = gr.Button(variant="primary")
|
177 |
proc_time_state = gr.State()
|
178 |
|
179 |
+
# Step 2
|
180 |
+
with gr.Column(elem_classes="section"):
|
181 |
+
step2_md = gr.Markdown()
|
182 |
+
with gr.Row():
|
183 |
+
original_text = gr.Textbox(interactive=False, lines=5)
|
184 |
+
corrected_text = gr.Textbox(interactive=True, lines=5)
|
185 |
|
186 |
+
# Step 3
|
187 |
+
with gr.Column(elem_classes="section"):
|
188 |
+
step3_md = gr.Markdown()
|
189 |
+
with gr.Row():
|
190 |
+
age_input = gr.Slider(minimum=0, maximum=100, step=1, value=25)
|
191 |
+
native_speaker_input = gr.Checkbox(value=True)
|
192 |
|
193 |
+
# Step 4
|
194 |
+
with gr.Column(elem_classes="section"):
|
195 |
+
step4_md = gr.Markdown()
|
196 |
with gr.Row(elem_classes="button-row"):
|
197 |
+
save_button = gr.Button(variant="primary")
|
198 |
+
save_status = gr.Textbox(interactive=False)
|
199 |
with gr.Row(elem_classes="button-row"):
|
200 |
+
download_button = gr.Button()
|
201 |
+
download_output = gr.File()
|
202 |
+
|
203 |
+
lang_btn.click(
|
204 |
+
toggle_language,
|
205 |
+
inputs=current_lang,
|
206 |
+
outputs=[
|
207 |
+
title_md, step1_md, audio_input, transcribe_button,
|
208 |
+
step2_md, original_text, corrected_text, step3_md,
|
209 |
+
age_input, native_speaker_input, step4_md, save_button,
|
210 |
+
save_status, download_button, download_output, lang_btn,
|
211 |
+
current_lang
|
212 |
+
]
|
213 |
+
)
|
214 |
+
|
215 |
+
transcribe_button.click(
|
216 |
+
transcribe_both,
|
217 |
+
inputs=audio_input,
|
218 |
+
outputs=[original_text, corrected_text, proc_time_state]
|
219 |
+
)
|
220 |
+
|
221 |
+
save_button.click(
|
222 |
+
store_correction,
|
223 |
+
inputs=[original_text, corrected_text, audio_input, proc_time_state, age_input, native_speaker_input],
|
224 |
+
outputs=save_status
|
225 |
+
)
|
226 |
+
|
227 |
+
download_button.click(
|
228 |
+
prepare_download,
|
229 |
+
inputs=[audio_input, original_text, corrected_text],
|
230 |
+
outputs=download_output
|
231 |
+
)
|
232 |
+
|
233 |
+
demo.load(
|
234 |
+
toggle_language,
|
235 |
+
inputs=current_lang,
|
236 |
+
outputs=[
|
237 |
+
title_md, step1_md, audio_input, transcribe_button,
|
238 |
+
step2_md, original_text, corrected_text, step3_md,
|
239 |
+
age_input, native_speaker_input, step4_md, save_button,
|
240 |
+
save_status, download_button, download_output, lang_btn,
|
241 |
+
current_lang
|
242 |
+
]
|
243 |
+
)
|
244 |
|
|
|
245 |
demo.launch(share=True)
|