whisper_transcription_api / services /whisper_service.py
Omkar008's picture
Update services/whisper_service.py
b666b7d verified
raw
history blame contribute delete
377 Bytes
import whisper
import torch
# Checking if NVIDIA GPU is available
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
# Load the Whisper model
model = whisper.load_model("base", device=DEVICE)
def transcribe_audio(file_path: str) -> str:
"""Transcribes the given audio file and returns the text."""
result = model.transcribe(file_path)
return result['text']