File size: 7,683 Bytes
0e467b4
 
 
 
 
0f2e342
 
0e467b4
 
 
 
 
0f2e342
0e467b4
 
 
 
 
 
 
95eb12c
0e467b4
 
 
 
95eb12c
0e467b4
0f2e342
0e467b4
 
 
 
556e3aa
 
0e467b4
 
 
 
 
 
 
 
63a667b
0e467b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b390a9a
0e467b4
 
 
 
 
 
 
 
 
 
 
 
b390a9a
0e467b4
 
 
 
 
 
 
 
 
 
b5a7d28
0e467b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
556e3aa
0e467b4
 
 
 
 
 
 
 
 
 
 
f293fac
 
 
0e467b4
 
f293fac
 
9cffc38
0e467b4
 
 
 
 
9cffc38
0e467b4
 
 
 
 
 
 
 
 
 
 
 
556e3aa
0e467b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
556e3aa
0e467b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
556e3aa
0e467b4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
import gradio as gr
import torch
import librosa
from transformers import Wav2Vec2Processor, AutoModelForCTC
import zipfile
import os
import firebase_admin
from firebase_admin import credentials, firestore, storage
from datetime import datetime, timedelta
import json
import tempfile
import uuid

# LOCAL INITIALIZATION - ONLY USE ON YOUR OWN DEVICE 
'''
os.chdir(os.path.dirname(os.path.abspath(__file__)))
cred = credentials.Certificate("serviceAccountKey.json")
'''
# Deployed Initialization
firebase_config = json.loads(os.environ.get('firebase_creds'))
cred = credentials.Certificate(firebase_config)

firebase_admin.initialize_app(cred, {
    "storageBucket": "amis-asr-corrections-dem-8cf3d.firebasestorage.app"
})
db = firestore.client()
bucket = storage.bucket()

# Load the ASR model and processor
MODEL_NAME = "eleferrand/XLSR_paiwan"
processor = Wav2Vec2Processor.from_pretrained(MODEL_NAME)
model = AutoModelForCTC.from_pretrained(MODEL_NAME)

def transcribe(audio_file):
    try:
        audio, rate = librosa.load(audio_file, sr=16000)
        input_values = processor(audio, sampling_rate=16000, return_tensors="pt").input_values

        with torch.no_grad():
            logits = model(input_values).logits
        predicted_ids = torch.argmax(logits, dim=-1)
        transcription = processor.batch_decode(predicted_ids)[0]
        return transcription.replace("[UNK]", "")
    except Exception as e:
        return f"處理文件錯誤: {e}"

def transcribe_both(audio_file):
    start_time = datetime.now()
    transcription = transcribe(audio_file)
    return transcription, transcription

def store_correction(original_transcription, corrected_transcription, audio_file, age, native_speaker):
    try:
        audio_metadata = {}
        audio_file_url = None
        
        # If an audio file is provided, upload it to Firebase Storage
        if audio_file and os.path.exists(audio_file):
            audio, sr = librosa.load(audio_file, sr=44100)
            duration = librosa.get_duration(y=audio, sr=sr)
            file_size = os.path.getsize(audio_file)
            audio_metadata = {'duration': duration, 'file_size': file_size}
            
            # Generate a unique identifier for the audio file
            unique_id = str(uuid.uuid4())
            destination_path = f"audio/pai/{unique_id}.wav"
            
            # Create a blob and upload the file
            blob = bucket.blob(destination_path)
            blob.upload_from_filename(audio_file)
            
            # Generate a signed download URL valid for 1 hour (adjust expiration as needed)
            audio_file_url = blob.generate_signed_url(expiration=timedelta(hours=1))
        
        combined_data = {
            'transcription_info': {
                'original_text': original_transcription,
                'corrected_text': corrected_transcription,
                'language': 'pai',
            },
            'audio_data': {
                'audio_metadata': audio_metadata,
                'audio_file_url': audio_file_url,
            },
            'user_info': {
                'native_paiwan_speaker': native_speaker,
                'age': age
            },
            'timestamp': datetime.now().isoformat(),
            'model_name': MODEL_NAME
        }
        # Save data to a collection for that language 
        db.collection('paiwan_transcriptions').add(combined_data)
        return "校正保存成功! (Correction saved successfully!)"
    except Exception as e:
        return f"保存失败: {e} (Error saving correction: {e})"

def prepare_download(audio_file, original_transcription, corrected_transcription):
    if audio_file is None:
        return None

    tmp_zip = tempfile.NamedTemporaryFile(delete=False, suffix=".zip")
    tmp_zip.close()
    with zipfile.ZipFile(tmp_zip.name, "w") as zf:
        if os.path.exists(audio_file):
            zf.write(audio_file, arcname="audio.wav")
        
        orig_txt = "original_transcription.txt"
        with open(orig_txt, "w", encoding="utf-8") as f:
            f.write(original_transcription)
        zf.write(orig_txt, arcname="original_transcription.txt")
        os.remove(orig_txt)

        corr_txt = "corrected_transcription.txt"
        with open(corr_txt, "w", encoding="utf-8") as f:
            f.write(corrected_transcription)
        zf.write(corr_txt, arcname="corrected_transcription.txt")
        os.remove(corr_txt)
    return tmp_zip.name

def toggle_language(switch):
    """Switch UI text between English and Traditional Chinese"""
    if switch:
        return (
            "排灣語自動語音識別逐字稿與修正系統", 
            "步驟 1:音訊上傳與逐字稿",
            "步驟 2:審閱與編輯逐字稿",
            "步驟 3:使用者資訊",
            "步驟 4:儲存與下載",
            "音訊輸入", "語音辨識",
            "原始逐字稿", "更正逐字稿",
            "年齡", "母語排灣語使用者?",
            "儲存更正", "儲存狀態",
            "下載 ZIP 檔案"
        )
    else:
        return (
            "Paiwan ASR Transcription & Correction System", 
            "Step 1: Audio Upload & Transcription",
            "Step 2: Review & Edit Transcription",
            "Step 3: User Information",
            "Step 4: Save & Download",
            "Audio Input", "Transcribe Audio",
            "Original Transcription", "Corrected Transcription",
            "Age", "Native Paiwan Speaker?",
            "Save Correction", "Save Status",
            "Download ZIP File"
        )

# Interface
with gr.Blocks() as demo:
    lang_switch = gr.Checkbox(label="切換到繁體中文 (Switch to Traditional Chinese)")
    
    title = gr.Markdown("Paiwan ASR Transcription & Correction System")
    step1 = gr.Markdown("Step 1: Audio Upload & Transcription")
    
    with gr.Row():
        audio_input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Audio Input")
    
    step2 = gr.Markdown("Step 2: Review & Edit Transcription")
    with gr.Row():
        transcribe_button = gr.Button("Transcribe Audio")
    
    original_text = gr.Textbox(label="Original Transcription", interactive=False, lines=5)
    corrected_text = gr.Textbox(label="Corrected Transcription", interactive=True, lines=5)

    step3 = gr.Markdown("Step 3: User Information")
    with gr.Row():
        age_input = gr.Slider(minimum=0, maximum=100, step=1, label="Age", value=25)
        native_speaker_input = gr.Checkbox(label="Native Paiwan Speaker?", value=True)

    step4 = gr.Markdown("Step 4: Save & Download")
    with gr.Row():
        save_button = gr.Button("Save Correction")
        save_status = gr.Textbox(label="Save Status", interactive=False)
    
    with gr.Row():
        download_button = gr.Button("Download ZIP File")
        download_output = gr.File()

    lang_switch.change(
        toggle_language, 
        inputs=lang_switch, 
        outputs=[title, step1, step2, step3, step4, audio_input, transcribe_button,
                 original_text, corrected_text, age_input, native_speaker_input,
                 save_button, save_status, download_button]
    )

    transcribe_button.click(
        transcribe_both, 
        inputs=audio_input, 
        outputs=[original_text, corrected_text]
    )

    save_button.click(
        store_correction, 
        inputs=[original_text, corrected_text, audio_input, age_input, native_speaker_input], 
        outputs=save_status
    )

    download_button.click(
        prepare_download, 
        inputs=[audio_input, original_text, corrected_text], 
        outputs=download_output
    )

demo.launch()