File size: 1,612 Bytes
c9612c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import gradio as gr
import moviepy.editor as mp
from transformers import pipeline

# Load Whisper model for speech-to-text
asr = pipeline("automatic-speech-recognition", model="openai/whisper-large")

# MarianMT or M2M100 for translation (multi-language)
translator = pipeline("translation", model="facebook/m2m100_418M")

def generate_subtitles(video_path, target_language):
    # Extract audio from video
    video = mp.VideoFileClip(video_path)
    audio = video.audio
    audio.write_audiofile("temp_audio.wav", codec='pcm_s16le')

    # Convert speech to text (ASR using Whisper)
    with open("temp_audio.wav", "rb") as audio_file:
        transcription = asr(audio_file)["text"]

    # Translate transcription to the target language
    translation_pipeline = pipeline('translation', model='facebook/m2m100_418M')
    translated_subtitles = translation_pipeline(transcription, forced_bos_token_id=translation_pipeline.tokenizer.get_lang_id(target_language))[0]["translation_text"]

    # Return subtitles (text for now)
    subtitles = f"Original: {transcription}\nTranslated: {translated_subtitles}"
    return subtitles

# Define Gradio interface
def subtitle_video(video_file, target_language):
    video_path = video_file.name
    return generate_subtitles(video_path, target_language)

# Gradio app layout
interface = gr.Interface(
    fn=subtitle_video,
    inputs=[
        gr.Video(label="Upload Video"),
        gr.Textbox(label="Target Language Code (e.g., 'fr' for French, 'es' for Spanish)"),
    ],
    outputs="text",
    title="Automatic Video Subtitler & Translator"
)

interface.launch()