dubai / models /whisper.py
fountai's picture
mimic
1fe2f2f
raw
history blame
274 Bytes
from faster_whisper import WhisperModel
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
compute_type = "float16" if torch.cuda.is_available() else "int8"
model_size = "medium"
model = WhisperModel(model_size, device=device, compute_type=compute_type)