semakoc hunterschep commited on
Commit
a090817
·
verified ·
1 Parent(s): 2a0048a

update to include some new database fields (#3)

Browse files

- update to include some new database fields (1300ad6784798fb74d479e8fbb272a79d3dccb8a)


Co-authored-by: Hunter S <[email protected]>

Files changed (1) hide show
  1. app.py +91 -46
app.py CHANGED
@@ -8,6 +8,7 @@ import firebase_admin
8
  from firebase_admin import credentials, firestore
9
  from datetime import datetime
10
  import json
 
11
 
12
  # Initialize Firebase
13
  firebase_config = json.loads(os.environ.get('firebase_creds'))
@@ -28,10 +29,9 @@ def transcribe(audio_file):
28
  try:
29
  # Load and resample the audio to 16kHz
30
  audio, rate = librosa.load(audio_file, sr=16000)
31
- # Prepare the input tensor for the model
32
  input_values = processor(audio, sampling_rate=16000, return_tensors="pt").input_values
33
 
34
- # Get model predictions (logits) and decode to text
35
  with torch.no_grad():
36
  logits = model(input_values).logits
37
  predicted_ids = torch.argmax(logits, dim=-1)
@@ -43,7 +43,7 @@ def transcribe(audio_file):
43
  def transcribe_both(audio_file):
44
  """
45
  Transcribes the audio and returns:
46
- - the original transcription (for the non-editable textbox),
47
  - the transcription (pre-filled for the editable textbox), and
48
  - the processing time (in seconds).
49
  """
@@ -52,38 +52,32 @@ def transcribe_both(audio_file):
52
  processing_time = (datetime.now() - start_time).total_seconds()
53
  return transcription, transcription, processing_time
54
 
55
- def store_correction(original_transcription, corrected_transcription, audio_file, processing_time):
56
  """
57
- Stores the transcriptions and additional metadata in Firestore.
58
- Saves:
59
- - original & corrected text,
60
- - timestamp,
61
- - processing time,
62
- - audio metadata (duration & file size, if available),
63
- - a placeholder for the audio URL, and
64
- - the model name.
65
  """
66
  try:
67
  audio_metadata = {}
68
  if audio_file and os.path.exists(audio_file):
69
- # Load audio for metadata calculations
70
  audio, sr = librosa.load(audio_file, sr=16000)
71
  duration = librosa.get_duration(y=audio, sr=sr)
72
  file_size = os.path.getsize(audio_file)
73
- audio_metadata = {
74
- 'duration': duration,
75
- 'file_size': file_size
76
- }
77
- correction_data = {
78
  'original_text': original_transcription,
79
  'corrected_text': corrected_transcription,
80
  'timestamp': datetime.now().isoformat(),
81
  'processing_time': processing_time,
82
  'audio_metadata': audio_metadata,
83
- 'audio_url': None,
84
- 'model_name': MODEL_NAME
 
 
 
 
85
  }
86
- db.collection('transcription_corrections').add(correction_data)
87
  return "Correction saved successfully!"
88
  except Exception as e:
89
  return f"Error saving correction: {e}"
@@ -91,7 +85,7 @@ def store_correction(original_transcription, corrected_transcription, audio_file
91
  def prepare_download(audio_file, original_transcription, corrected_transcription):
92
  """
93
  Prepares a ZIP file containing:
94
- - The uploaded audio file (as audio.wav),
95
  - a text file with the original transcription, and
96
  - a text file with the corrected transcription.
97
  Returns the ZIP file's path.
@@ -99,53 +93,103 @@ def prepare_download(audio_file, original_transcription, corrected_transcription
99
  if audio_file is None:
100
  return None
101
 
102
- zip_filename = "results.zip"
103
- with zipfile.ZipFile(zip_filename, "w") as zf:
 
 
104
  # Add the audio file (renamed inside the zip)
105
  if os.path.exists(audio_file):
106
  zf.write(audio_file, arcname="audio.wav")
107
  else:
108
  print("Audio file not found:", audio_file)
109
 
110
- # Add the original transcription as a text file
111
  orig_txt = "original_transcription.txt"
112
  with open(orig_txt, "w", encoding="utf-8") as f:
113
  f.write(original_transcription)
114
  zf.write(orig_txt, arcname="original_transcription.txt")
115
  os.remove(orig_txt)
116
 
117
- # Add the corrected transcription as a text file
118
  corr_txt = "corrected_transcription.txt"
119
  with open(corr_txt, "w", encoding="utf-8") as f:
120
  f.write(corrected_transcription)
121
  zf.write(corr_txt, arcname="corrected_transcription.txt")
122
  os.remove(corr_txt)
123
- return zip_filename
124
 
125
  # Build the Gradio Blocks interface with improved styling
126
  with gr.Blocks(css="""
127
- .container { max-width: 800px; margin: auto; }
128
- .title { text-align: center; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  """) as demo:
130
  with gr.Column(elem_classes="container"):
131
- gr.Markdown("<h1 class='title'>ASR Demo with Editable Transcription</h1>")
132
- with gr.Row():
133
- audio_input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Upload or Record Audio")
134
- transcribe_button = gr.Button("Transcribe Audio", variant="primary")
135
- with gr.Row():
136
- original_text = gr.Textbox(label="Original Transcription", interactive=False, lines=5)
137
- corrected_text = gr.Textbox(label="Corrected Transcription", interactive=True, lines=5)
138
- # Hidden state to hold processing time
139
- proc_time_state = gr.State()
140
- with gr.Row():
141
- save_button = gr.Button("Save Correction to Database", variant="primary")
142
- save_status = gr.Textbox(label="Save Status", interactive=False)
143
- with gr.Accordion("Download Options", open=False):
144
- with gr.Row():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
  download_button = gr.Button("Download Results (ZIP)")
146
  download_output = gr.File(label="Download ZIP")
147
 
148
- # Set up actions
149
  transcribe_button.click(
150
  fn=transcribe_both,
151
  inputs=audio_input,
@@ -154,7 +198,7 @@ with gr.Blocks(css="""
154
 
155
  save_button.click(
156
  fn=store_correction,
157
- inputs=[original_text, corrected_text, audio_input, proc_time_state],
158
  outputs=save_status
159
  )
160
 
@@ -164,5 +208,6 @@ with gr.Blocks(css="""
164
  outputs=download_output
165
  )
166
 
 
167
  # Launch the demo
168
- demo.launch(share=True)
 
8
  from firebase_admin import credentials, firestore
9
  from datetime import datetime
10
  import json
11
+ import tempfile
12
 
13
  # Initialize Firebase
14
  firebase_config = json.loads(os.environ.get('firebase_creds'))
 
29
  try:
30
  # Load and resample the audio to 16kHz
31
  audio, rate = librosa.load(audio_file, sr=16000)
 
32
  input_values = processor(audio, sampling_rate=16000, return_tensors="pt").input_values
33
 
34
+ # Get model predictions and decode to text
35
  with torch.no_grad():
36
  logits = model(input_values).logits
37
  predicted_ids = torch.argmax(logits, dim=-1)
 
43
  def transcribe_both(audio_file):
44
  """
45
  Transcribes the audio and returns:
46
+ - the original transcription (non-editable textbox),
47
  - the transcription (pre-filled for the editable textbox), and
48
  - the processing time (in seconds).
49
  """
 
52
  processing_time = (datetime.now() - start_time).total_seconds()
53
  return transcription, transcription, processing_time
54
 
55
+ def store_correction(original_transcription, corrected_transcription, audio_file, processing_time, age, native_speaker):
56
  """
57
+ Stores the transcriptions and additional metadata (including user info and audio details)
58
+ in Firestore as a single document.
 
 
 
 
 
 
59
  """
60
  try:
61
  audio_metadata = {}
62
  if audio_file and os.path.exists(audio_file):
 
63
  audio, sr = librosa.load(audio_file, sr=16000)
64
  duration = librosa.get_duration(y=audio, sr=sr)
65
  file_size = os.path.getsize(audio_file)
66
+ audio_metadata = {'duration': duration, 'file_size': file_size}
67
+ combined_data = {
 
 
 
68
  'original_text': original_transcription,
69
  'corrected_text': corrected_transcription,
70
  'timestamp': datetime.now().isoformat(),
71
  'processing_time': processing_time,
72
  'audio_metadata': audio_metadata,
73
+ 'audio_url': None, # Placeholder if you decide to store an URL later
74
+ 'model_name': MODEL_NAME,
75
+ 'user_info': {
76
+ 'native_amis_speaker': native_speaker,
77
+ 'age': age
78
+ }
79
  }
80
+ db.collection('transcriptions').add(combined_data)
81
  return "Correction saved successfully!"
82
  except Exception as e:
83
  return f"Error saving correction: {e}"
 
85
  def prepare_download(audio_file, original_transcription, corrected_transcription):
86
  """
87
  Prepares a ZIP file containing:
88
+ - the uploaded audio file (as audio.wav),
89
  - a text file with the original transcription, and
90
  - a text file with the corrected transcription.
91
  Returns the ZIP file's path.
 
93
  if audio_file is None:
94
  return None
95
 
96
+ # Create a temporary file to avoid filename conflicts
97
+ tmp_zip = tempfile.NamedTemporaryFile(delete=False, suffix=".zip")
98
+ tmp_zip.close()
99
+ with zipfile.ZipFile(tmp_zip.name, "w") as zf:
100
  # Add the audio file (renamed inside the zip)
101
  if os.path.exists(audio_file):
102
  zf.write(audio_file, arcname="audio.wav")
103
  else:
104
  print("Audio file not found:", audio_file)
105
 
106
+ # Write and add the original transcription
107
  orig_txt = "original_transcription.txt"
108
  with open(orig_txt, "w", encoding="utf-8") as f:
109
  f.write(original_transcription)
110
  zf.write(orig_txt, arcname="original_transcription.txt")
111
  os.remove(orig_txt)
112
 
113
+ # Write and add the corrected transcription
114
  corr_txt = "corrected_transcription.txt"
115
  with open(corr_txt, "w", encoding="utf-8") as f:
116
  f.write(corrected_transcription)
117
  zf.write(corr_txt, arcname="corrected_transcription.txt")
118
  os.remove(corr_txt)
119
+ return tmp_zip.name
120
 
121
  # Build the Gradio Blocks interface with improved styling
122
  with gr.Blocks(css="""
123
+ .container {
124
+ max-width: 800px;
125
+ margin: auto;
126
+ padding: 20px;
127
+ font-family: Arial, sans-serif;
128
+ }
129
+ .header {
130
+ text-align: center;
131
+ margin-bottom: 30px;
132
+ }
133
+ .section {
134
+ margin-bottom: 30px;
135
+ padding: 15px;
136
+ border: 1px solid #ddd;
137
+ border-radius: 8px;
138
+ background-color: #f9f9f9;
139
+ }
140
+ .section h3 {
141
+ margin-top: 0;
142
+ margin-bottom: 15px;
143
+ text-align: center;
144
+ }
145
+ .button-row {
146
+ display: flex;
147
+ justify-content: center;
148
+ gap: 10px;
149
+ flex-wrap: wrap;
150
+ }
151
+ @media (max-width: 600px) {
152
+ .gradio-row {
153
+ flex-direction: column;
154
+ }
155
+ }
156
  """) as demo:
157
  with gr.Column(elem_classes="container"):
158
+ gr.Markdown("<h1 class='header'>ASR Demo with Editable Transcription</h1>")
159
+
160
+ # Step 1: Audio Upload & Transcription
161
+ with gr.Box(elem_classes="section"):
162
+ gr.Markdown("### Step 1: Audio Upload & Transcription")
163
+ with gr.Row(elem_classes="gradio-row"):
164
+ audio_input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Audio Input")
165
+ transcribe_button = gr.Button("Transcribe Audio", variant="primary")
166
+ proc_time_state = gr.State()
167
+
168
+ # Step 2: Review & Edit Transcription
169
+ with gr.Box(elem_classes="section"):
170
+ gr.Markdown("### Step 2: Review & Edit Transcription")
171
+ with gr.Row(elem_classes="gradio-row"):
172
+ original_text = gr.Textbox(label="Original Transcription", interactive=False, lines=5, placeholder="Transcription will appear here...")
173
+ corrected_text = gr.Textbox(label="Corrected Transcription", interactive=True, lines=5, placeholder="Edit transcription here...")
174
+
175
+ # Step 3: User Information
176
+ with gr.Box(elem_classes="section"):
177
+ gr.Markdown("### Step 3: User Information")
178
+ with gr.Row(elem_classes="gradio-row"):
179
+ age_input = gr.Slider(minimum=0, maximum=100, step=1, label="Age", value=25)
180
+ native_speaker_input = gr.Checkbox(label="Native Amis Speaker", value=True)
181
+
182
+ # Step 4: Save & Download
183
+ with gr.Box(elem_classes="section"):
184
+ gr.Markdown("### Step 4: Save & Download")
185
+ with gr.Row(elem_classes="button-row"):
186
+ save_button = gr.Button("Save Correction to Database", variant="primary")
187
+ save_status = gr.Textbox(label="Save Status", interactive=False, placeholder="Status messages will appear here...")
188
+ with gr.Row(elem_classes="button-row"):
189
  download_button = gr.Button("Download Results (ZIP)")
190
  download_output = gr.File(label="Download ZIP")
191
 
192
+ # UI Actions
193
  transcribe_button.click(
194
  fn=transcribe_both,
195
  inputs=audio_input,
 
198
 
199
  save_button.click(
200
  fn=store_correction,
201
+ inputs=[original_text, corrected_text, audio_input, proc_time_state, age_input, native_speaker_input],
202
  outputs=save_status
203
  )
204
 
 
208
  outputs=download_output
209
  )
210
 
211
+
212
  # Launch the demo
213
+ demo.launch(share=True)