import gradio as gr import os import sys import subprocess #from moviepy.editor import VideoFileClip import whisper from whisper.utils import write_vtt model = whisper.load_model("medium") def video2mp3(video_file, output_ext="mp3"): filename, ext = os.path.splitext(video_file) subprocess.call(["ffmpeg", "-y", "-i", video_file, f"{filename}.{output_ext}"], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) return f"{filename}.{output_ext}" def translate(input_video): audio_file = video2mp3(input_video) options = dict(beam_size=5, best_of=5) translate_options = dict(task="translate", **options) result = model.transcribe(audio_file,**translate_options) return result["text"] block = gr.Blocks() with block: with gr.Group(): with gr.Box(): with gr.Row().style(): inp_video = gr.Video( label="Input Video", type="filepath", mirror_webcam = False ) op_video = gr.Textbox() btn = gr.Button("Generate Subtitle Video") btn.click(translate, inputs=[inp_video], outputs=[op_video]) block.launch(enable_queue = True,debug=True)