Spaces:
Sleeping
Sleeping
File size: 3,027 Bytes
402daee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import os
from io import StringIO
from threading import Lock
from typing import Union, BinaryIO
import torch
import tqdm
import whisper
from faster_whisper import WhisperModel
from .utils import ResultWriter, WriteTXT, WriteSRT, WriteVTT, WriteTSV, WriteJSON
ASR_ENGINE_OPTIONS = frozenset([
"task",
"language",
"initial_prompt",
"vad_filter",
"word_timestamps",
])
model_name = os.getenv("ASR_MODEL", "small")
model_path = os.getenv("ASR_MODEL_PATH", os.path.join(os.path.expanduser("~"), ".cache", "whisper"))
model_lock = Lock()
model = None
def load_model(next_model_name: str):
with model_lock:
global model_name, model
if model and next_model_name == model_name:
return model
if torch.cuda.is_available():
model = WhisperModel(model_size_or_path=next_model_name, device="cuda", compute_type="float32", download_root=model_path)
else:
model = WhisperModel(model_size_or_path=next_model_name, device="cpu", compute_type="int8", download_root=model_path)
model_name = next_model_name
return model
def transcribe(audio, asr_options, output):
options_dict = {k: v for k, v in asr_options.items() if k in ASR_ENGINE_OPTIONS}
with model_lock:
segments = []
text = ""
segment_generator, info = model.transcribe(audio, beam_size=5, **options_dict)
with tqdm.tqdm(total=round(info.duration), unit='sec') as tqdm_pbar:
for segment in segment_generator:
segment_dict = segment._asdict()
if segment.words:
segment_dict["words"] = [word._asdict() for word in segment.words]
segments.append(segment_dict)
text = text + segment.text
tqdm_pbar.update(segment.end - segment.start)
result = {
"language": options_dict.get("language", info.language),
"segments": segments,
"text": text
}
output_file = StringIO()
write_result(result, output_file, output)
output_file.seek(0)
return output_file
def language_detection(audio):
# load audio and pad/trim it to fit 30 seconds
audio = whisper.pad_or_trim(audio)
# detect the spoken language
with model_lock:
segments, info = model.transcribe(audio, beam_size=5)
detected_lang_code = info.language
return detected_lang_code
def write_result(
result: dict, file: BinaryIO, output: Union[str, None]
):
if output == "srt":
WriteSRT(ResultWriter).write_result(result, file=file)
elif output == "vtt":
WriteVTT(ResultWriter).write_result(result, file=file)
elif output == "tsv":
WriteTSV(ResultWriter).write_result(result, file=file)
elif output == "json":
WriteJSON(ResultWriter).write_result(result, file=file)
elif output == "txt":
WriteTXT(ResultWriter).write_result(result, file=file)
else:
return 'Please select an output method!'
|