anzorq commited on
Commit
da952ef
·
verified ·
1 Parent(s): 4f046d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -27
app.py CHANGED
@@ -7,8 +7,10 @@ from transformers import AutoModelForCTC, Wav2Vec2BertProcessor
7
  from pytube import YouTube
8
  from transformers import pipeline
9
  import re
10
- import numpy as np
 
11
  from scipy.signal import wiener
 
12
 
13
  # pipe = pipeline(model="anzorq/w2v-bert-2.0-kbd", device=0) # old model
14
  pipe = pipeline(model="anzorq/w2v-bert-2.0-kbd-v2", device=0) # new model with a new tokenizer
@@ -26,19 +28,20 @@ reverse_pattern = re.compile('|'.join(re.escape(key) for key in reverse_replacem
26
  def replace_symbols_back(text):
27
  return reverse_pattern.sub(lambda match: reverse_replacements[match.group(0)], text)
28
 
29
- def normalize_audio(audio_tensor):
30
- peak = torch.max(torch.abs(audio_tensor))
31
- normalized_audio = audio_tensor / peak
32
- return normalized_audio
33
 
34
- def apply_wiener_filter(audio_tensor):
35
- audio_data = audio_tensor.numpy()
36
  filtered_audio = wiener(audio_data)
37
- return torch.tensor(filtered_audio)
38
 
39
- def resample_audio(audio_tensor, original_sample_rate, target_sample_rate=16000):
40
- resampled_audio = torchaudio.transforms.Resample(original_sample_rate, target_sample_rate)(audio_tensor)
41
- return resampled_audio
 
42
 
43
  @spaces.GPU
44
  def transcribe_speech(audio, progress=gr.Progress()):
@@ -54,25 +57,20 @@ def transcribe_from_youtube(url, apply_improvements, progress=gr.Progress()):
54
  progress(0, "Downloading YouTube audio...")
55
  audio_path = YouTube(url).streams.filter(only_audio=True)[0].download(filename="tmp.mp4")
56
 
57
- try:
58
- audio, original_sample_rate = torchaudio.load(audio_path)
59
-
60
- if apply_improvements:
61
- progress(0.2, "Normalizing audio...")
62
- audio = normalize_audio(audio)
63
 
64
- progress(0.4, "Applying Wiener filter...")
65
- audio = apply_wiener_filter(audio)
66
 
67
- progress(0.6, "Resampling audio...")
68
- audio = resample_audio(audio, original_sample_rate)
69
 
70
- progress(0.8, "Transcribing audio...")
71
- transcription = transcribe_speech(audio)
72
 
73
- finally:
74
- if os.path.exists(audio_path):
75
- os.remove(audio_path)
76
 
77
  return transcription
78
 
@@ -117,4 +115,4 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
117
  transcribe_button.click(fn=transcribe_from_youtube, inputs=[youtube_url, apply_improvements], outputs=transcription_output)
118
  youtube_url.change(populate_metadata, inputs=[youtube_url], outputs=[img, title])
119
 
120
- demo.launch()
 
7
  from pytube import YouTube
8
  from transformers import pipeline
9
  import re
10
+ from pydub import AudioSegment
11
+ from scipy.io import wavfile
12
  from scipy.signal import wiener
13
+ import numpy as np
14
 
15
  # pipe = pipeline(model="anzorq/w2v-bert-2.0-kbd", device=0) # old model
16
  pipe = pipeline(model="anzorq/w2v-bert-2.0-kbd-v2", device=0) # new model with a new tokenizer
 
28
  def replace_symbols_back(text):
29
  return reverse_pattern.sub(lambda match: reverse_replacements[match.group(0)], text)
30
 
31
+ def normalize_audio(audio_path):
32
+ audio = AudioSegment.from_file(audio_path, format="mp4")
33
+ normalized_audio = audio.normalize()
34
+ normalized_audio.export(audio_path, format="mp4")
35
 
36
+ def apply_wiener_filter(audio_path):
37
+ sample_rate, audio_data = wavfile.read(audio_path)
38
  filtered_audio = wiener(audio_data)
39
+ wavfile.write(audio_path, sample_rate, filtered_audio.astype(np.int16))
40
 
41
+ def resample_audio(audio_path, target_sample_rate=16000):
42
+ audio, sample_rate = torchaudio.load(audio_path)
43
+ resampled_audio = torchaudio.transforms.Resample(sample_rate, target_sample_rate)(audio)
44
+ torchaudio.save(audio_path, resampled_audio, target_sample_rate)
45
 
46
  @spaces.GPU
47
  def transcribe_speech(audio, progress=gr.Progress()):
 
57
  progress(0, "Downloading YouTube audio...")
58
  audio_path = YouTube(url).streams.filter(only_audio=True)[0].download(filename="tmp.mp4")
59
 
60
+ if apply_improvements:
61
+ progress(0.2, "Normalizing audio...")
62
+ normalize_audio(audio_path)
 
 
 
63
 
64
+ progress(0.4, "Applying Wiener filter...")
65
+ apply_wiener_filter(audio_path)
66
 
67
+ progress(0.6, "Resampling audio...")
68
+ resample_audio(audio_path)
69
 
70
+ progress(0.8, "Transcribing audio...")
71
+ transcription = transcribe_speech(audio_path)
72
 
73
+ os.remove(audio_path)
 
 
74
 
75
  return transcription
76
 
 
115
  transcribe_button.click(fn=transcribe_from_youtube, inputs=[youtube_url, apply_improvements], outputs=transcription_output)
116
  youtube_url.change(populate_metadata, inputs=[youtube_url], outputs=[img, title])
117
 
118
+ demo.launch()