Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -10,6 +10,9 @@ from flores200_codes import flores_codes
|
|
10 |
import tempfile
|
11 |
import os
|
12 |
|
|
|
|
|
|
|
13 |
MODEL_NAME = "openai/whisper-large-v2"
|
14 |
BATCH_SIZE = 8
|
15 |
FILE_LIMIT_MB = 1000
|
@@ -49,7 +52,7 @@ def transcribe(inputs, task):
|
|
49 |
raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
|
50 |
|
51 |
text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
|
52 |
-
translated_text =
|
53 |
return text, translated_text
|
54 |
|
55 |
|
@@ -106,8 +109,9 @@ def yt_transcribe(yt_url, task, max_filesize=75.0):
|
|
106 |
inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
|
107 |
|
108 |
text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
|
|
|
109 |
|
110 |
-
return html_embed_str, text
|
111 |
|
112 |
|
113 |
lang_codes = list(flores_codes.keys())
|
@@ -163,7 +167,7 @@ yt_transcribe = gr.Interface(
|
|
163 |
gr.inputs.Dropdown(lang_codes, default='English', label='Source Language'),
|
164 |
gr.inputs.Dropdown(lang_codes, default='French', label='Target Language'),
|
165 |
],
|
166 |
-
outputs=["html", "text"],
|
167 |
layout="horizontal",
|
168 |
theme="huggingface",
|
169 |
title="Whisper Large V2: Transcribe YouTube",
|
|
|
10 |
import tempfile
|
11 |
import os
|
12 |
|
13 |
+
global model_dict
|
14 |
+
model_dict = load_models()
|
15 |
+
|
16 |
MODEL_NAME = "openai/whisper-large-v2"
|
17 |
BATCH_SIZE = 8
|
18 |
FILE_LIMIT_MB = 1000
|
|
|
52 |
raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
|
53 |
|
54 |
text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
|
55 |
+
translated_text = translation(source_lang, target_lang, text)["result"]
|
56 |
return text, translated_text
|
57 |
|
58 |
|
|
|
109 |
inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
|
110 |
|
111 |
text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
|
112 |
+
translated_text = translation(source_lang, target_lang, text)["result"]
|
113 |
|
114 |
+
return html_embed_str, text, translated_text
|
115 |
|
116 |
|
117 |
lang_codes = list(flores_codes.keys())
|
|
|
167 |
gr.inputs.Dropdown(lang_codes, default='English', label='Source Language'),
|
168 |
gr.inputs.Dropdown(lang_codes, default='French', label='Target Language'),
|
169 |
],
|
170 |
+
outputs=["html", "text", "translated_text"],
|
171 |
layout="horizontal",
|
172 |
theme="huggingface",
|
173 |
title="Whisper Large V2: Transcribe YouTube",
|