ovieyra21 commited on
Commit
b16790d
·
verified ·
1 Parent(s): 0db8f6e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +132 -62
app.py CHANGED
@@ -1,17 +1,20 @@
1
  import torch
 
2
  import gradio as gr
3
  import yt_dlp as youtube_dl
4
  import numpy as np
5
  from datasets import Dataset, Audio
6
  from scipy.io import wavfile
 
7
  from transformers import pipeline
8
  from transformers.pipelines.audio_utils import ffmpeg_read
 
9
  import tempfile
10
  import os
11
  import time
12
  import demucs.api
13
 
14
- MODEL_NAME = "openai/whisper-large-v2"
15
  DEMUCS_MODEL_NAME = "htdemucs_ft"
16
  BATCH_SIZE = 8
17
  FILE_LIMIT_MB = 1000
@@ -26,28 +29,34 @@ pipe = pipeline(
26
  device=device,
27
  )
28
 
29
- separator = demucs.api.Separator(model=DEMUCS_MODEL_NAME)
30
 
31
  def separate_vocal(path):
32
  origin, separated = separator.separate_audio_file(path)
33
  demucs.api.save_audio(separated["vocals"], path, samplerate=separator.samplerate)
34
  return path
35
 
36
- def transcribe(inputs_path, task, use_demucs, dataset_name, oauth_token, progress=gr.Progress()):
 
37
  if inputs_path is None:
38
  raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
39
- if not dataset_name:
40
  raise gr.Error("No dataset name submitted! Please submit a dataset name. Should be in the format : <user>/<dataset_name> or <org>/<dataset_name>. Also accepts <dataset_name>, which will default to the namespace of the logged-in user.")
41
- if oauth_token is None:
42
- raise gr.Error("No OAuth token submitted! Please login to use this demo.")
43
 
 
 
 
 
44
  total_step = 4
45
  current_step = 0
46
 
47
  current_step += 1
48
  progress((current_step, total_step), desc="Transcribe using Whisper.")
49
- sampling_rate, inputs = wavfile.read(inputs_path)
 
 
50
  out = pipe(inputs_path, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)
 
51
  text = out["text"]
52
 
53
  current_step += 1
@@ -56,21 +65,26 @@ def transcribe(inputs_path, task, use_demucs, dataset_name, oauth_token, progres
56
 
57
  current_step += 1
58
  progress((current_step, total_step), desc="Create dataset.")
 
 
59
  transcripts = []
60
  audios = []
61
  with tempfile.TemporaryDirectory() as tmpdirname:
62
- for i, chunk in enumerate(progress.tqdm(chunks, desc="Creating dataset (and clean audio if asked for)")):
 
 
63
  arr = chunk["audio"]
64
  path = os.path.join(tmpdirname, f"{i}.wav")
65
- wavfile.write(path, sampling_rate, arr)
66
 
67
  if use_demucs == "separate-audio":
 
68
  print(f"Separating vocals #{i}")
69
  path = separate_vocal(path)
70
 
71
  audios.append(path)
72
  transcripts.append(chunk["text"])
73
-
74
  dataset = Dataset.from_dict({"audio": audios, "text": transcripts}).cast_column("audio", Audio())
75
 
76
  current_step += 1
@@ -79,6 +93,7 @@ def transcribe(inputs_path, task, use_demucs, dataset_name, oauth_token, progres
79
 
80
  return [[transcript] for transcript in transcripts], text
81
 
 
82
  def _return_yt_html_embed(yt_url):
83
  video_id = yt_url.split("?v=")[-1]
84
  HTML_str = (
@@ -118,18 +133,24 @@ def download_yt_audio(yt_url, filename):
118
  except youtube_dl.utils.ExtractorError as err:
119
  raise gr.Error(str(err))
120
 
121
- def yt_transcribe(yt_url, task, use_demucs, dataset_name, oauth_token, max_filesize=75.0, dataset_sampling_rate=24000, progress=gr.Progress()):
 
 
 
122
  if yt_url is None:
123
- raise gr.Error("No YouTube link submitted! Please put a working link.")
124
- if not dataset_name:
125
  raise gr.Error("No dataset name submitted! Please submit a dataset name. Should be in the format : <user>/<dataset_name> or <org>/<dataset_name>. Also accepts <dataset_name>, which will default to the namespace of the logged-in user.")
126
- if oauth_token is None:
127
- raise gr.Error("No OAuth token submitted! Please login to use this demo.")
128
 
129
  total_step = 5
130
  current_step = 0
131
 
132
  html_embed_str = _return_yt_html_embed(yt_url)
 
 
 
 
 
133
  current_step += 1
134
  progress((current_step, total_step), desc="Load video.")
135
 
@@ -137,15 +158,19 @@ def yt_transcribe(yt_url, task, use_demucs, dataset_name, oauth_token, max_files
137
  filepath = os.path.join(tmpdirname, "video.mp4")
138
 
139
  download_yt_audio(yt_url, filepath)
140
- inputs = ffmpeg_read(filepath, pipe.feature_extractor.sampling_rate)
 
 
 
141
  inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
142
 
143
  current_step += 1
144
  progress((current_step, total_step), desc="Transcribe using Whisper.")
145
  out = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)
 
146
  text = out["text"]
147
 
148
- inputs = ffmpeg_read(filepath, dataset_sampling_rate)
149
 
150
  current_step += 1
151
  progress((current_step, total_step), desc="Merge chunks.")
@@ -153,90 +178,135 @@ def yt_transcribe(yt_url, task, use_demucs, dataset_name, oauth_token, max_files
153
 
154
  current_step += 1
155
  progress((current_step, total_step), desc="Create dataset.")
 
156
  transcripts = []
157
  audios = []
158
  with tempfile.TemporaryDirectory() as tmpdirname:
159
- for i, chunk in enumerate(progress.tqdm(chunks, desc="Creating dataset (and clean audio if asked for)")):
 
 
160
  arr = chunk["audio"]
161
  path = os.path.join(tmpdirname, f"{i}.wav")
162
- wavfile.write(path, dataset_sampling_rate, arr)
163
 
164
  if use_demucs == "separate-audio":
 
165
  print(f"Separating vocals #{i}")
166
  path = separate_vocal(path)
167
 
168
  audios.append(path)
169
  transcripts.append(chunk["text"])
170
-
171
  dataset = Dataset.from_dict({"audio": audios, "text": transcripts}).cast_column("audio", Audio())
172
 
173
  current_step += 1
174
  progress((current_step, total_step), desc="Push dataset.")
175
  dataset.push_to_hub(dataset_name, token=oauth_token.token if oauth_token else oauth_token)
176
 
 
177
  return html_embed_str, [[transcript] for transcript in transcripts], text
178
 
179
- def naive_postprocess_whisper_chunks(chunks, audio_array, sampling_rate, stop_chars=".!:;?", min_duration=5):
 
 
 
 
180
  min_duration = int(min_duration * sampling_rate)
 
 
181
  new_chunks = []
182
  while chunks:
183
  current_chunk = chunks.pop(0)
 
184
  begin, end = current_chunk["timestamp"]
185
- begin, end = int(begin * sampling_rate), int(end * sampling_rate)
186
- current_dur = end - begin
 
 
187
  text = current_chunk["text"]
 
 
188
  chunk_to_concat = [audio_array[begin:end]]
189
- while chunks and (text[-1] not in stop_chars or (current_dur < min_duration)):
190
  ch = chunks.pop(0)
191
  begin, end = ch["timestamp"]
192
- begin, end = int(begin * sampling_rate), int(end * sampling_rate)
193
- current_dur += end - begin
 
194
  text = "".join([text, ch["text"]])
 
 
195
  chunk_to_concat.append(audio_array[begin:end])
 
 
196
  new_chunks.append({
197
- "text": text,
198
- "audio": np.concatenate(chunk_to_concat)
199
  })
 
 
200
  return new_chunks
201
 
202
- with gr.Blocks() as demo:
203
- with gr.Tab("Local file"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  with gr.Row():
205
  with gr.Column():
206
- local_audio_input = gr.Audio(type="filepath", label="Upload Audio")
207
- task_input = gr.Dropdown(choices=["transcribe", "translate"], value="transcribe", label="Task")
208
- use_demucs_input = gr.Dropdown(choices=["do-nothing", "separate-audio"], value="do-nothing", label="Audio preprocessing")
209
- dataset_name_input = gr.Textbox(label="Dataset name")
210
- hf_token = gr.Textbox(label="HuggingFace Token")
211
- submit_local_button = gr.Button("Transcribe")
 
 
 
212
  with gr.Column():
213
- local_output_text = gr.Dataframe(label="Transcripts")
214
- local_output_full_text = gr.Textbox(label="Full Text")
215
-
216
- submit_local_button.click(
217
- transcribe,
218
- inputs=[local_audio_input, task_input, use_demucs_input, dataset_name_input, hf_token],
219
- outputs=[local_output_text, local_output_full_text],
220
- )
221
-
222
- with gr.Tab("YouTube video"):
 
223
  with gr.Row():
224
  with gr.Column():
225
- yt_url_input = gr.Textbox(label="YouTube URL")
226
- yt_task_input = gr.Dropdown(choices=["transcribe", "translate"], value="transcribe", label="Task")
227
- yt_use_demucs_input = gr.Dropdown(choices=["do-nothing", "separate-audio"], value="do-nothing", label="Audio preprocessing")
228
- yt_dataset_name_input = gr.Textbox(label="Dataset name")
229
- yt_hf_token = gr.Textbox(label="HuggingFace Token")
230
- submit_yt_button = gr.Button("Transcribe")
 
 
 
231
  with gr.Column():
232
- yt_html_embed_str = gr.HTML()
233
- yt_output_text = gr.Dataframe(label="Transcripts")
234
- yt_output_full_text = gr.Textbox(label="Full Text")
 
235
 
236
- submit_yt_button.click(
237
- yt_transcribe,
238
- inputs=[yt_url_input, yt_task_input, yt_use_demucs_input, yt_dataset_name_input, yt_hf_token],
239
- outputs=[yt_html_embed_str, yt_output_text, yt_output_full_text],
240
- )
241
-
242
- demo.launch(share=True)
 
1
  import torch
2
+
3
  import gradio as gr
4
  import yt_dlp as youtube_dl
5
  import numpy as np
6
  from datasets import Dataset, Audio
7
  from scipy.io import wavfile
8
+
9
  from transformers import pipeline
10
  from transformers.pipelines.audio_utils import ffmpeg_read
11
+
12
  import tempfile
13
  import os
14
  import time
15
  import demucs.api
16
 
17
+ MODEL_NAME = "openai/whisper-large-v3" # "patrickvonplaten/wav2vec2-large-960h-lv60-self-4-gram" #
18
  DEMUCS_MODEL_NAME = "htdemucs_ft"
19
  BATCH_SIZE = 8
20
  FILE_LIMIT_MB = 1000
 
29
  device=device,
30
  )
31
 
32
+ separator = demucs.api.Separator(model = DEMUCS_MODEL_NAME, )
33
 
34
  def separate_vocal(path):
35
  origin, separated = separator.separate_audio_file(path)
36
  demucs.api.save_audio(separated["vocals"], path, samplerate=separator.samplerate)
37
  return path
38
 
39
+
40
+ def transcribe(inputs_path, task, use_demucs, dataset_name, oauth_token: gr.OAuthToken | None, progress=gr.Progress()):
41
  if inputs_path is None:
42
  raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
43
+ if dataset_name is None:
44
  raise gr.Error("No dataset name submitted! Please submit a dataset name. Should be in the format : <user>/<dataset_name> or <org>/<dataset_name>. Also accepts <dataset_name>, which will default to the namespace of the logged-in user.")
 
 
45
 
46
+ if oauth_token is None:
47
+ gr.Warning("Make sure to click and login before using this demo.")
48
+ return [["transcripts will appear here"]], ""
49
+
50
  total_step = 4
51
  current_step = 0
52
 
53
  current_step += 1
54
  progress((current_step, total_step), desc="Transcribe using Whisper.")
55
+
56
+ sampling_rate, inputs = wavfile.read(inputs_path)
57
+
58
  out = pipe(inputs_path, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)
59
+
60
  text = out["text"]
61
 
62
  current_step += 1
 
65
 
66
  current_step += 1
67
  progress((current_step, total_step), desc="Create dataset.")
68
+
69
+
70
  transcripts = []
71
  audios = []
72
  with tempfile.TemporaryDirectory() as tmpdirname:
73
+ for i,chunk in enumerate(progress.tqdm(chunks, desc="Creating dataset (and clean audio if asked for)")):
74
+
75
+ # TODO: make sure 1D or 2D?
76
  arr = chunk["audio"]
77
  path = os.path.join(tmpdirname, f"{i}.wav")
78
+ wavfile.write(path, sampling_rate, arr)
79
 
80
  if use_demucs == "separate-audio":
81
+ # use demucs tp separate vocals
82
  print(f"Separating vocals #{i}")
83
  path = separate_vocal(path)
84
 
85
  audios.append(path)
86
  transcripts.append(chunk["text"])
87
+
88
  dataset = Dataset.from_dict({"audio": audios, "text": transcripts}).cast_column("audio", Audio())
89
 
90
  current_step += 1
 
93
 
94
  return [[transcript] for transcript in transcripts], text
95
 
96
+
97
  def _return_yt_html_embed(yt_url):
98
  video_id = yt_url.split("?v=")[-1]
99
  HTML_str = (
 
133
  except youtube_dl.utils.ExtractorError as err:
134
  raise gr.Error(str(err))
135
 
136
+
137
+ def yt_transcribe(yt_url, task, use_demucs, dataset_name, oauth_token: gr.OAuthToken | None, max_filesize=75.0, dataset_sampling_rate = 24000,
138
+ progress=gr.Progress()):
139
+
140
  if yt_url is None:
141
+ raise gr.Error("No youtube link submitted! Please put a working link.")
142
+ if dataset_name is None:
143
  raise gr.Error("No dataset name submitted! Please submit a dataset name. Should be in the format : <user>/<dataset_name> or <org>/<dataset_name>. Also accepts <dataset_name>, which will default to the namespace of the logged-in user.")
 
 
144
 
145
  total_step = 5
146
  current_step = 0
147
 
148
  html_embed_str = _return_yt_html_embed(yt_url)
149
+
150
+ if oauth_token is None:
151
+ gr.Warning("Make sure to click and login before using this demo.")
152
+ return html_embed_str, [["transcripts will appear here"]], ""
153
+
154
  current_step += 1
155
  progress((current_step, total_step), desc="Load video.")
156
 
 
158
  filepath = os.path.join(tmpdirname, "video.mp4")
159
 
160
  download_yt_audio(yt_url, filepath)
161
+ with open(filepath, "rb") as f:
162
+ inputs_path = f.read()
163
+
164
+ inputs = ffmpeg_read(inputs_path, pipe.feature_extractor.sampling_rate)
165
  inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
166
 
167
  current_step += 1
168
  progress((current_step, total_step), desc="Transcribe using Whisper.")
169
  out = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)
170
+
171
  text = out["text"]
172
 
173
+ inputs = ffmpeg_read(inputs_path, dataset_sampling_rate)
174
 
175
  current_step += 1
176
  progress((current_step, total_step), desc="Merge chunks.")
 
178
 
179
  current_step += 1
180
  progress((current_step, total_step), desc="Create dataset.")
181
+
182
  transcripts = []
183
  audios = []
184
  with tempfile.TemporaryDirectory() as tmpdirname:
185
+ for i,chunk in enumerate(progress.tqdm(chunks, desc="Creating dataset (and clean audio if asked for).")):
186
+
187
+ # TODO: make sure 1D or 2D?
188
  arr = chunk["audio"]
189
  path = os.path.join(tmpdirname, f"{i}.wav")
190
+ wavfile.write(path, dataset_sampling_rate, arr)
191
 
192
  if use_demucs == "separate-audio":
193
+ # use demucs tp separate vocals
194
  print(f"Separating vocals #{i}")
195
  path = separate_vocal(path)
196
 
197
  audios.append(path)
198
  transcripts.append(chunk["text"])
199
+
200
  dataset = Dataset.from_dict({"audio": audios, "text": transcripts}).cast_column("audio", Audio())
201
 
202
  current_step += 1
203
  progress((current_step, total_step), desc="Push dataset.")
204
  dataset.push_to_hub(dataset_name, token=oauth_token.token if oauth_token else oauth_token)
205
 
206
+
207
  return html_embed_str, [[transcript] for transcript in transcripts], text
208
 
209
+
210
+ def naive_postprocess_whisper_chunks(chunks, audio_array, sampling_rate, stop_chars = ".!:;?", min_duration = 5):
211
+ # merge chunks as long as merged audio duration is lower than min_duration and that a stop character is not met
212
+ # return list of dictionnaries (text, audio)
213
+ # min duration is in seconds
214
  min_duration = int(min_duration * sampling_rate)
215
+
216
+
217
  new_chunks = []
218
  while chunks:
219
  current_chunk = chunks.pop(0)
220
+
221
  begin, end = current_chunk["timestamp"]
222
+ begin, end = int(begin*sampling_rate), int(end*sampling_rate)
223
+
224
+ current_dur = end-begin
225
+
226
  text = current_chunk["text"]
227
+
228
+
229
  chunk_to_concat = [audio_array[begin:end]]
230
+ while chunks and (text[-1] not in stop_chars or (current_dur<min_duration)):
231
  ch = chunks.pop(0)
232
  begin, end = ch["timestamp"]
233
+ begin, end = int(begin*sampling_rate), int(end*sampling_rate)
234
+ current_dur += end-begin
235
+
236
  text = "".join([text, ch["text"]])
237
+
238
+ # TODO: add silence ?
239
  chunk_to_concat.append(audio_array[begin:end])
240
+
241
+
242
  new_chunks.append({
243
+ "text": text.strip(),
244
+ "audio": np.concatenate(chunk_to_concat),
245
  })
246
+ print(f"LENGTH CHUNK #{len(new_chunks)}: {current_dur/sampling_rate}s")
247
+
248
  return new_chunks
249
 
250
+ css = """
251
+ #intro{
252
+ max-width: 100%;
253
+ text-align: center;
254
+ margin: 0 auto;
255
+ }
256
+ """
257
+ with gr.Blocks(css=css) as demo:
258
+ with gr.Row():
259
+ gr.LoginButton()
260
+ gr.LogoutButton()
261
+
262
+ with gr.Tab("YouTube"):
263
+ gr.Markdown("Create your own TTS dataset using Youtube", elem_id="intro")
264
+ gr.Markdown(
265
+ "This demo allows use to create a text-to-speech dataset from an input audio snippet and push it to hub to keep track of it."
266
+ f"Demo uses the checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to automatically transcribe audio files"
267
+ " of arbitrary length. It then merge chunks of audio and push it to the hub."
268
+ )
269
  with gr.Row():
270
  with gr.Column():
271
+ audio_youtube = gr.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL")
272
+ task_youtube = gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")
273
+ cleaning_youtube = gr.Radio(["no-post-processing", "separate-audio"], label="Audio separation and cleaning (takes longer - use it if your samples are not cleaned (background noise and music))", value="separate-audio")
274
+ textbox_youtube = gr.Textbox(lines=1, placeholder="Place your new dataset name here. Should be in the format : <user>/<dataset_name> or <org>/<dataset_name>. Also accepts <dataset_name>, which will default to the namespace of the logged-in user.", label="Dataset name")
275
+
276
+ with gr.Row():
277
+ clear_youtube = gr.ClearButton([audio_youtube, task_youtube, cleaning_youtube, textbox_youtube])
278
+ submit_youtube = gr.Button("Submit")
279
+
280
  with gr.Column():
281
+ html_youtube = gr.HTML()
282
+ dataset_youtube = gr.Dataset(label="Transcribed samples.",components=["text"], headers=["Transcripts"], samples=[["transcripts will appear here"]])
283
+ transcript_youtube = gr.Textbox(label="Transcription")
284
+
285
+ with gr.Tab("Microphone or Audio file"):
286
+ gr.Markdown("Create your own TTS dataset using your own recordings", elem_id="intro")
287
+ gr.Markdown(
288
+ "This demo allows use to create a text-to-speech dataset from an input audio snippet and push it to hub to keep track of it."
289
+ f"Demo uses the checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to automatically transcribe audio files"
290
+ " of arbitrary length. It then merge chunks of audio and push it to the hub."
291
+ )
292
  with gr.Row():
293
  with gr.Column():
294
+ audio_file = gr.Audio(type="filepath")
295
+ task_file = gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")
296
+ cleaning_file = gr.Radio(["no-post-processing", "separate-audio"], label="Audio separation and cleaning (takes longer - use it if your samples are not cleaned (background noise and music))", value="separate-audio")
297
+ textbox_file = gr.Textbox(lines=1, placeholder="Place your new dataset name here. Should be in the format : <user>/<dataset_name> or <org>/<dataset_name>. Also accepts <dataset_name>, which will default to the namespace of the logged-in user.", label="Dataset name")
298
+
299
+ with gr.Row():
300
+ clear_file = gr.ClearButton([audio_file, task_file, cleaning_file, textbox_file])
301
+ submit_file = gr.Button("Submit")
302
+
303
  with gr.Column():
304
+ dataset_file = gr.Dataset(label="Transcribed samples.", components=["text"], headers=["Transcripts"], samples=[["transcripts will appear here"]])
305
+ transcript_file = gr.Textbox(label="Transcription")
306
+
307
+
308
 
309
+ submit_file.click(transcribe, inputs=[audio_file, task_file, cleaning_file, textbox_file], outputs=[dataset_file, transcript_file])
310
+ submit_youtube.click(yt_transcribe, inputs=[audio_youtube, task_youtube, cleaning_youtube, textbox_youtube], outputs=[html_youtube, dataset_youtube, transcript_youtube])
311
+
312
+ demo.launch(debug=True)