File size: 1,292 Bytes
f36e52e
 
 
 
1dc65c3
f36e52e
 
 
 
 
 
48be59b
f36e52e
 
 
 
 
1dc65c3
f36e52e
 
 
48be59b
f36e52e
1dc65c3
f36e52e
48be59b
f36e52e
 
 
 
 
 
48be59b
f36e52e
 
 
 
1dc65c3
f36e52e
48be59b
f36e52e
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import torch
from transformers import WhisperProcessor, WhisperForConditionalGeneration
import whisper
from config import WHISPER_MODEL_SIZE
import spaces

# Global variables to store models
whisper_processor = None
whisper_model = None
whisper_model_small = None

@spaces.GPU
def load_models():
    global whisper_processor, whisper_model, whisper_model_small
    if whisper_processor is None:
        whisper_processor = WhisperProcessor.from_pretrained(f"openai/whisper-{WHISPER_MODEL_SIZE}")
    if whisper_model is None:
        whisper_model = WhisperForConditionalGeneration.from_pretrained(f"openai/whisper-{WHISPER_MODEL_SIZE}")
    if whisper_model_small is None:
        whisper_model_small = whisper.load_model(WHISPER_MODEL_SIZE)

@spaces.GPU
def get_device():
    return "cuda" if torch.cuda.is_available() else "cpu"

@spaces.GPU
def get_processor():
    global whisper_processor
    if whisper_processor is None:
        load_models()
    return whisper_processor

@spaces.GPU
def get_model():
    global whisper_model
    if whisper_model is None:
        load_models()
    return whisper_model.to(get_device())

@spaces.GPU
def get_whisper_model_small():
    global whisper_model_small
    if whisper_model_small is None:
        load_models()
    return whisper_model_small