semakoc's picture
update to include some new database fields (#3)
a090817 verified
raw
history blame
8.15 kB
import gradio as gr
import torch
import librosa
from transformers import Wav2Vec2Processor, AutoModelForCTC
import zipfile
import os
import firebase_admin
from firebase_admin import credentials, firestore
from datetime import datetime
import json
import tempfile
# Initialize Firebase
firebase_config = json.loads(os.environ.get('firebase_creds'))
cred = credentials.Certificate(firebase_config) # Your Firebase JSON key file
firebase_admin.initialize_app(cred)
db = firestore.client()
# Load the ASR model and processor
MODEL_NAME = "eleferrand/xlsr53_Amis"
processor = Wav2Vec2Processor.from_pretrained(MODEL_NAME)
model = AutoModelForCTC.from_pretrained(MODEL_NAME)
def transcribe(audio_file):
"""
Transcribes the audio file using the loaded ASR model.
Returns the transcription string.
"""
try:
# Load and resample the audio to 16kHz
audio, rate = librosa.load(audio_file, sr=16000)
input_values = processor(audio, sampling_rate=16000, return_tensors="pt").input_values
# Get model predictions and decode to text
with torch.no_grad():
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.batch_decode(predicted_ids)[0]
return transcription.replace("[UNK]", "")
except Exception as e:
return f"Error processing file: {e}"
def transcribe_both(audio_file):
"""
Transcribes the audio and returns:
- the original transcription (non-editable textbox),
- the transcription (pre-filled for the editable textbox), and
- the processing time (in seconds).
"""
start_time = datetime.now()
transcription = transcribe(audio_file)
processing_time = (datetime.now() - start_time).total_seconds()
return transcription, transcription, processing_time
def store_correction(original_transcription, corrected_transcription, audio_file, processing_time, age, native_speaker):
"""
Stores the transcriptions and additional metadata (including user info and audio details)
in Firestore as a single document.
"""
try:
audio_metadata = {}
if audio_file and os.path.exists(audio_file):
audio, sr = librosa.load(audio_file, sr=16000)
duration = librosa.get_duration(y=audio, sr=sr)
file_size = os.path.getsize(audio_file)
audio_metadata = {'duration': duration, 'file_size': file_size}
combined_data = {
'original_text': original_transcription,
'corrected_text': corrected_transcription,
'timestamp': datetime.now().isoformat(),
'processing_time': processing_time,
'audio_metadata': audio_metadata,
'audio_url': None, # Placeholder if you decide to store an URL later
'model_name': MODEL_NAME,
'user_info': {
'native_amis_speaker': native_speaker,
'age': age
}
}
db.collection('transcriptions').add(combined_data)
return "Correction saved successfully!"
except Exception as e:
return f"Error saving correction: {e}"
def prepare_download(audio_file, original_transcription, corrected_transcription):
"""
Prepares a ZIP file containing:
- the uploaded audio file (as audio.wav),
- a text file with the original transcription, and
- a text file with the corrected transcription.
Returns the ZIP file's path.
"""
if audio_file is None:
return None
# Create a temporary file to avoid filename conflicts
tmp_zip = tempfile.NamedTemporaryFile(delete=False, suffix=".zip")
tmp_zip.close()
with zipfile.ZipFile(tmp_zip.name, "w") as zf:
# Add the audio file (renamed inside the zip)
if os.path.exists(audio_file):
zf.write(audio_file, arcname="audio.wav")
else:
print("Audio file not found:", audio_file)
# Write and add the original transcription
orig_txt = "original_transcription.txt"
with open(orig_txt, "w", encoding="utf-8") as f:
f.write(original_transcription)
zf.write(orig_txt, arcname="original_transcription.txt")
os.remove(orig_txt)
# Write and add the corrected transcription
corr_txt = "corrected_transcription.txt"
with open(corr_txt, "w", encoding="utf-8") as f:
f.write(corrected_transcription)
zf.write(corr_txt, arcname="corrected_transcription.txt")
os.remove(corr_txt)
return tmp_zip.name
# Build the Gradio Blocks interface with improved styling
with gr.Blocks(css="""
.container {
max-width: 800px;
margin: auto;
padding: 20px;
font-family: Arial, sans-serif;
}
.header {
text-align: center;
margin-bottom: 30px;
}
.section {
margin-bottom: 30px;
padding: 15px;
border: 1px solid #ddd;
border-radius: 8px;
background-color: #f9f9f9;
}
.section h3 {
margin-top: 0;
margin-bottom: 15px;
text-align: center;
}
.button-row {
display: flex;
justify-content: center;
gap: 10px;
flex-wrap: wrap;
}
@media (max-width: 600px) {
.gradio-row {
flex-direction: column;
}
}
""") as demo:
with gr.Column(elem_classes="container"):
gr.Markdown("<h1 class='header'>ASR Demo with Editable Transcription</h1>")
# Step 1: Audio Upload & Transcription
with gr.Box(elem_classes="section"):
gr.Markdown("### Step 1: Audio Upload & Transcription")
with gr.Row(elem_classes="gradio-row"):
audio_input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Audio Input")
transcribe_button = gr.Button("Transcribe Audio", variant="primary")
proc_time_state = gr.State()
# Step 2: Review & Edit Transcription
with gr.Box(elem_classes="section"):
gr.Markdown("### Step 2: Review & Edit Transcription")
with gr.Row(elem_classes="gradio-row"):
original_text = gr.Textbox(label="Original Transcription", interactive=False, lines=5, placeholder="Transcription will appear here...")
corrected_text = gr.Textbox(label="Corrected Transcription", interactive=True, lines=5, placeholder="Edit transcription here...")
# Step 3: User Information
with gr.Box(elem_classes="section"):
gr.Markdown("### Step 3: User Information")
with gr.Row(elem_classes="gradio-row"):
age_input = gr.Slider(minimum=0, maximum=100, step=1, label="Age", value=25)
native_speaker_input = gr.Checkbox(label="Native Amis Speaker", value=True)
# Step 4: Save & Download
with gr.Box(elem_classes="section"):
gr.Markdown("### Step 4: Save & Download")
with gr.Row(elem_classes="button-row"):
save_button = gr.Button("Save Correction to Database", variant="primary")
save_status = gr.Textbox(label="Save Status", interactive=False, placeholder="Status messages will appear here...")
with gr.Row(elem_classes="button-row"):
download_button = gr.Button("Download Results (ZIP)")
download_output = gr.File(label="Download ZIP")
# UI Actions
transcribe_button.click(
fn=transcribe_both,
inputs=audio_input,
outputs=[original_text, corrected_text, proc_time_state]
)
save_button.click(
fn=store_correction,
inputs=[original_text, corrected_text, audio_input, proc_time_state, age_input, native_speaker_input],
outputs=save_status
)
download_button.click(
fn=prepare_download,
inputs=[audio_input, original_text, corrected_text],
outputs=download_output
)
# Launch the demo
demo.launch(share=True)