Futuresony commited on
Commit
60800d1
·
verified ·
1 Parent(s): c10a870

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +144 -26
app.py CHANGED
@@ -1,28 +1,146 @@
1
- import gradio as gr
2
  import torch
3
- import torchaudio
4
- from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
5
-
6
- # Load MMS ASR model
7
- MODEL_NAME = "facebook/mms-1b-all"
8
- device = "cuda" if torch.cuda.is_available() else "cpu"
9
-
10
- processor = AutoProcessor.from_pretrained(MODEL_NAME)
11
- model = AutoModelForSpeechSeq2Seq.from_pretrained(MODEL_NAME).to(device)
12
- asr_pipeline = pipeline("automatic-speech-recognition", model=model, processor=processor, torch_dtype=torch.float16, device=0 if device == "cuda" else -1)
13
-
14
- # Speech-to-text function
15
- def transcribe(audio):
16
- waveform, sr = torchaudio.load(audio)
17
- waveform = torchaudio.transforms.Resample(sr, 16000)(waveform) # Ensure 16kHz sample rate
18
- text = asr_pipeline({"array": waveform.squeeze().numpy(), "sampling_rate": 16000})["text"]
19
- return text
20
-
21
- # Gradio UI
22
- gr.Interface(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  fn=transcribe,
24
- inputs=gr.Audio(source="microphone", type="filepath"),
25
- outputs=gr.Text(label="Transcription"),
26
- title="Real-time Speech-to-Text",
27
- description="Speak into your microphone and see the transcribed text.",
28
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
  import torch
3
+
4
+ import gradio as gr
5
+ import yt_dlp as youtube_dl
6
+ from transformers import pipeline
7
+ from transformers.pipelines.audio_utils import ffmpeg_read
8
+
9
+ import tempfile
10
+ import os
11
+
12
+ MODEL_NAME = "openai/whisper-large-v3-turbo"
13
+ BATCH_SIZE = 8
14
+ FILE_LIMIT_MB = 1000
15
+ YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files
16
+
17
+ device = 0 if torch.cuda.is_available() else "cpu"
18
+
19
+ pipe = pipeline(
20
+ task="automatic-speech-recognition",
21
+ model=MODEL_NAME,
22
+ chunk_length_s=30,
23
+ device=device,
24
+ )
25
+
26
+
27
+ @spaces.GPU
28
+ def transcribe(inputs, task):
29
+ if inputs is None:
30
+ raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
31
+
32
+ text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
33
+ return text
34
+
35
+
36
+ def _return_yt_html_embed(yt_url):
37
+ video_id = yt_url.split("?v=")[-1]
38
+ HTML_str = (
39
+ f'<center> <iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"> </iframe>'
40
+ " </center>"
41
+ )
42
+ return HTML_str
43
+
44
+ def download_yt_audio(yt_url, filename):
45
+ info_loader = youtube_dl.YoutubeDL()
46
+
47
+ try:
48
+ info = info_loader.extract_info(yt_url, download=False)
49
+ except youtube_dl.utils.DownloadError as err:
50
+ raise gr.Error(str(err))
51
+
52
+ file_length = info["duration_string"]
53
+ file_h_m_s = file_length.split(":")
54
+ file_h_m_s = [int(sub_length) for sub_length in file_h_m_s]
55
+
56
+ if len(file_h_m_s) == 1:
57
+ file_h_m_s.insert(0, 0)
58
+ if len(file_h_m_s) == 2:
59
+ file_h_m_s.insert(0, 0)
60
+ file_length_s = file_h_m_s[0] * 3600 + file_h_m_s[1] * 60 + file_h_m_s[2]
61
+
62
+ if file_length_s > YT_LENGTH_LIMIT_S:
63
+ yt_length_limit_hms = time.strftime("%HH:%MM:%SS", time.gmtime(YT_LENGTH_LIMIT_S))
64
+ file_length_hms = time.strftime("%HH:%MM:%SS", time.gmtime(file_length_s))
65
+ raise gr.Error(f"Maximum YouTube length is {yt_length_limit_hms}, got {file_length_hms} YouTube video.")
66
+
67
+ ydl_opts = {"outtmpl": filename, "format": "worstvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best"}
68
+
69
+ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
70
+ try:
71
+ ydl.download([yt_url])
72
+ except youtube_dl.utils.ExtractorError as err:
73
+ raise gr.Error(str(err))
74
+
75
+ @spaces.GPU
76
+ def yt_transcribe(yt_url, task, max_filesize=75.0):
77
+ html_embed_str = _return_yt_html_embed(yt_url)
78
+
79
+ with tempfile.TemporaryDirectory() as tmpdirname:
80
+ filepath = os.path.join(tmpdirname, "video.mp4")
81
+ download_yt_audio(yt_url, filepath)
82
+ with open(filepath, "rb") as f:
83
+ inputs = f.read()
84
+
85
+ inputs = ffmpeg_read(inputs, pipe.feature_extractor.sampling_rate)
86
+ inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
87
+
88
+ text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
89
+
90
+ return html_embed_str, text
91
+
92
+
93
+ demo = gr.Blocks(theme=gr.themes.Ocean())
94
+
95
+ mf_transcribe = gr.Interface(
96
+ fn=transcribe,
97
+ inputs=[
98
+ gr.Audio(sources="microphone", type="filepath"),
99
+ gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
100
+ ],
101
+ outputs="text",
102
+ title="Whisper Large V3 Turbo: Transcribe Audio",
103
+ description=(
104
+ "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
105
+ f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
106
+ " of arbitrary length."
107
+ ),
108
+ allow_flagging="never",
109
+ )
110
+
111
+ file_transcribe = gr.Interface(
112
  fn=transcribe,
113
+ inputs=[
114
+ gr.Audio(sources="upload", type="filepath", label="Audio file"),
115
+ gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
116
+ ],
117
+ outputs="text",
118
+ title="Whisper Large V3: Transcribe Audio",
119
+ description=(
120
+ "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
121
+ f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
122
+ " of arbitrary length."
123
+ ),
124
+ allow_flagging="never",
125
+ )
126
+
127
+ yt_transcribe = gr.Interface(
128
+ fn=yt_transcribe,
129
+ inputs=[
130
+ gr.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"),
131
+ gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")
132
+ ],
133
+ outputs=["html", "text"],
134
+ title="Whisper Large V3: Transcribe YouTube",
135
+ description=(
136
+ "Transcribe long-form YouTube videos with the click of a button! Demo uses the checkpoint"
137
+ f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe video files of"
138
+ " arbitrary length."
139
+ ),
140
+ allow_flagging="never",
141
+ )
142
+
143
+ with demo:
144
+ gr.TabbedInterface([mf_transcribe, file_transcribe, yt_transcribe], ["Microphone", "Audio file", "YouTube"])
145
+
146
+ demo.queue().launch(ssr_mode=False)