faster-whisper-readme / handler.py
ManBib's picture
fixed processing through link or audio bytes
2b16bc4
raw
history blame
4.21 kB
import argparse
import base64
import io
import logging
import os
from faster_whisper import WhisperModel
from pydub import AudioSegment
from file_processor import process_video
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def is_cdn_link(link_or_bytes):
logging.info("Checking if the provided link is a CDN link...")
if isinstance(link_or_bytes, bytes):
return False
return True
def get_audio_bytes(audio_path):
audio = AudioSegment.from_file(audio_path)
buffer = io.BytesIO()
audio.export(buffer, format='mp3')
buffer.seek(0)
return buffer
class EndpointHandler:
def __init__(self, path=""):
self.model = WhisperModel("large-v2", num_workers=30)
def __call__(self, data: dict[str, str]):
inputs = data.pop("inputs")
language = data.pop("language", "de")
task = data.pop("task", "transcribe")
response = {}
audio_path = None
if is_cdn_link(inputs):
slides, audio_path = process_video(inputs)
audio_bytes = get_audio_bytes(audio_path)
slides_list = [slide.to_dict() for slide in slides]
response.update({"slides": slides_list})
else:
audio_bytes_decoded = base64.b64decode(inputs)
logging.debug(f"Decoded Bytes Length: {len(audio_bytes_decoded)}")
audio_bytes = io.BytesIO(audio_bytes_decoded)
logging.info("Running inference...")
segments, info = self.model.transcribe(audio_bytes, language=language, task=task)
full_text = []
for segment in segments:
full_text.append({"segmentId": segment.id,
"text": segment.text,
"timestamps": {
"start": segment.start,
"end": segment.end
}
})
if segment.id % 100 == 0:
logging.info("segment " + str(segment.id) + " transcribed")
logging.info("Inference completed.")
response.update({"audios": full_text})
logging.debug(response)
if audio_path:
os.remove(audio_path)
return response
if __name__ == '__main__':
Parser = argparse.ArgumentParser(description="EndpointHandler")
Parser.add_argument("-p", "--path")
Parser.add_argument("-l", "--language", default="de")
Parser.add_argument("-t", "--task", default="transcribe")
Parser.add_argument("--type", default="video")
Args = Parser.parse_args()
handler = EndpointHandler()
# Args.path = r"C:\Users\mbabu\AppData\Local\Temp\tmpsezkw2i5.mp3"
# Args.path = "https://rr2---sn-4g5lzned.googlevideo.com/videoplayback?expire=1703474038&ei=Fp-IZeagJPaii9oPn4i3wAo&ip=195.146.4.71&id=o-ABVj7-vfJlewWZrzUMc466iPpKFkv2lNf6sHtRJ3F7s7&itag=22&source=youtube&requiressl=yes&xpc=EgVo2aDSNQ%3D%3D&mh=O5&mm=31%2C29&mn=sn-4g5lzned%2Csn-4g5ednsr&ms=au%2Crdu&mv=m&mvi=2&pl=25&initcwndbps=503750&spc=UWF9f8xjKc9m59EXQeaCQlNJLtGZUcI&vprv=1&svpuc=1&mime=video%2Fmp4&cnr=14&ratebypass=yes&dur=108.135&lmt=1701260382315374&mt=1703452143&fvip=1&fexp=24007246&c=ANDROID&txp=5308224&sparams=expire%2Cei%2Cip%2Cid%2Citag%2Csource%2Crequiressl%2Cxpc%2Cspc%2Cvprv%2Csvpuc%2Cmime%2Ccnr%2Cratebypass%2Cdur%2Clmt&sig=AJfQdSswRQIgP2osevilYOsP2YAHKqjlgQZVbT_UEzktBYVyOLUg4QgCIQDJdcRA_SOMBhwrMMVAkGkVyoho7rm99Y-io9xs1cVEsg%3D%3D&lsparams=mh%2Cmm%2Cmn%2Cms%2Cmv%2Cmvi%2Cpl%2Cinitcwndbps&lsig=AAO5W4owRgIhAJZ4g0mJIJzQ-5nvrNk5hdZQDzCfvifDuWfDXUu1tS0QAiEAuo-TgZtkwGUxLejKR7J_f2jU-aCV2pS8kx4Yl8zOQBc%3D&title=An%20alle%20Lehrkr%C3%A4fte%3A%20Finanzielle%20Bildung%20mit%20Daniel%20Jung%20%F0%9F%8E%93"
if is_cdn_link(Args.path):
test_inputs = Args.path
else:
audio = AudioSegment.from_mp3(Args.path)
buffer = io.BytesIO()
audio.export(buffer, format="mp3")
mp3_bytes = buffer.getvalue()
test_inputs = base64.b64encode(mp3_bytes)
sample_data = {
"inputs": test_inputs,
"language": Args.language,
"task": Args.task,
}
test = handler(sample_data)
print(test)