import whisper | |
import torch | |
# Checking if NVIDIA GPU is available | |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" | |
# Load the Whisper model | |
model = whisper.load_model("base", device=DEVICE) | |
def transcribe_audio(file_path: str) -> str: | |
"""Transcribes the given audio file and returns the text.""" | |
result = model.transcribe(file_path) | |
return result['text'] | |