Spaces:
Sleeping
Sleeping
Switch to distil large
Browse files
app.py
CHANGED
@@ -3,21 +3,17 @@ import spaces
|
|
3 |
import torch
|
4 |
import io
|
5 |
import os.path
|
6 |
-
import
|
7 |
import whisper
|
8 |
|
9 |
-
|
10 |
-
urllib.request.urlretrieve(
|
11 |
-
"https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
|
12 |
-
"/home/user/.cache/whisper/small.en.pt"
|
13 |
-
)
|
14 |
|
15 |
writer = whisper.utils.get_writer("srt", "/dev/null")
|
16 |
|
17 |
@spaces.GPU
|
18 |
def generate(file, progress=gr.Progress(track_tqdm=True)):
|
19 |
# get file to type bytes somehow
|
20 |
-
model = whisper.load_model(
|
21 |
audio = whisper.load_audio(file)
|
22 |
result = model.transcribe(audio, verbose=False)
|
23 |
out = io.StringIO()
|
|
|
3 |
import torch
|
4 |
import io
|
5 |
import os.path
|
6 |
+
from huggingface_hub import hf_hub_download
|
7 |
import whisper
|
8 |
|
9 |
+
model_path = hf_hub_download(repo_id="distil-whisper/distil-large-v3-openai", filename="model.bin")
|
|
|
|
|
|
|
|
|
10 |
|
11 |
writer = whisper.utils.get_writer("srt", "/dev/null")
|
12 |
|
13 |
@spaces.GPU
|
14 |
def generate(file, progress=gr.Progress(track_tqdm=True)):
|
15 |
# get file to type bytes somehow
|
16 |
+
model = whisper.load_model(model_path, device="cuda")
|
17 |
audio = whisper.load_audio(file)
|
18 |
result = model.transcribe(audio, verbose=False)
|
19 |
out = io.StringIO()
|