Spaces:
Paused
Paused
Commit
•
66efbc3
1
Parent(s):
9d6fa91
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,8 @@ from transformers import pipeline
|
|
6 |
|
7 |
MODEL_NAME = "openai/whisper-large-v2"
|
8 |
BATCH_SIZE = 8
|
|
|
|
|
9 |
|
10 |
device = 0 if torch.cuda.is_available() else "cpu"
|
11 |
|
@@ -31,7 +33,13 @@ def transcribe(microphone, file_upload, task):
|
|
31 |
)
|
32 |
|
33 |
elif (microphone is None) and (file_upload is None):
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
file = microphone if microphone is not None else file_upload
|
37 |
|
@@ -51,11 +59,20 @@ def _return_yt_html_embed(yt_url):
|
|
51 |
return HTML_str
|
52 |
|
53 |
|
54 |
-
def yt_transcribe(yt_url, task):
|
55 |
yt = pt.YouTube(yt_url)
|
56 |
html_embed_str = _return_yt_html_embed(yt_url)
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
pipe.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]]
|
61 |
|
|
|
6 |
|
7 |
MODEL_NAME = "openai/whisper-large-v2"
|
8 |
BATCH_SIZE = 8
|
9 |
+
FILE_LIMIT_MB = 1000
|
10 |
+
YT_ATTEMPT_LIMIT = 3
|
11 |
|
12 |
device = 0 if torch.cuda.is_available() else "cpu"
|
13 |
|
|
|
33 |
)
|
34 |
|
35 |
elif (microphone is None) and (file_upload is None):
|
36 |
+
raise gr.Error("You have to either use the microphone or upload an audio file")
|
37 |
+
|
38 |
+
file_size_mb = os.stat(inputs).st_size / (1024 * 1024)
|
39 |
+
if file_size_mb > FILE_LIMIT_MB:
|
40 |
+
raise gr.Error(
|
41 |
+
f"File size exceeds file size limit. Got file of size {file_size_mb:.2f}MB for a limit of {FILE_LIMIT_MB}MB."
|
42 |
+
)
|
43 |
|
44 |
file = microphone if microphone is not None else file_upload
|
45 |
|
|
|
59 |
return HTML_str
|
60 |
|
61 |
|
62 |
+
def yt_transcribe(yt_url, task, max_filesize=75.0):
|
63 |
yt = pt.YouTube(yt_url)
|
64 |
html_embed_str = _return_yt_html_embed(yt_url)
|
65 |
+
for attempt in range(YT_ATTEMPT_LIMIT):
|
66 |
+
try:
|
67 |
+
yt = pytube.YouTube(yt_url)
|
68 |
+
stream = yt.streams.filter(only_audio=True)[0]
|
69 |
+
break
|
70 |
+
except KeyError:
|
71 |
+
if attempt + 1 == YT_ATTEMPT_LIMIT:
|
72 |
+
raise gr.Error("An error occurred while loading the YouTube video. Please try again.")
|
73 |
+
|
74 |
+
if stream.filesize_mb > max_filesize:
|
75 |
+
raise gr.Error(f"Maximum YouTube file size is {max_filesize}MB, got {stream.filesize_mb:.2f}MB.")
|
76 |
|
77 |
pipe.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]]
|
78 |
|