import torch from transformers import WhisperProcessor, WhisperForConditionalGeneration import whisper from config import WHISPER_MODEL_SIZE import spaces # Global variables to store models whisper_processor = None whisper_model = None whisper_model_small = None @spaces.GPU def load_models(): global whisper_processor, whisper_model, whisper_model_small if whisper_processor is None: whisper_processor = WhisperProcessor.from_pretrained(f"openai/whisper-{WHISPER_MODEL_SIZE}") if whisper_model is None: whisper_model = WhisperForConditionalGeneration.from_pretrained(f"openai/whisper-{WHISPER_MODEL_SIZE}") if whisper_model_small is None: whisper_model_small = whisper.load_model(WHISPER_MODEL_SIZE) @spaces.GPU def get_device(): return "cuda" if torch.cuda.is_available() else "cpu" @spaces.GPU def get_processor(): global whisper_processor if whisper_processor is None: load_models() return whisper_processor @spaces.GPU def get_model(): global whisper_model if whisper_model is None: load_models() return whisper_model.to(get_device()) @spaces.GPU def get_whisper_model_small(): global whisper_model_small if whisper_model_small is None: load_models() return whisper_model_small