Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,52 +1,46 @@
|
|
1 |
import gradio as gr
|
2 |
import whisper
|
3 |
import os
|
4 |
-
from moviepy.editor import VideoFileClip, TextClip, CompositeVideoClip
|
5 |
|
6 |
# Load the Whisper model
|
7 |
model = whisper.load_model("base") # Choose 'tiny', 'base', 'small', 'medium', or 'large'
|
8 |
|
9 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
# Transcribe the video to generate subtitles
|
11 |
-
result = model.transcribe(video_file)
|
12 |
-
|
13 |
-
# Create a list of (start_time, end_time, text) tuples for subtitles
|
14 |
-
subtitles = [(segment['start'], segment['end'], segment['text'].strip()) for segment in result['segments']]
|
15 |
-
|
16 |
-
# Create a subtitled video
|
17 |
-
subtitled_video_file = create_subtitled_video(video_file, subtitles)
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
def create_subtitled_video(video_file, subtitles):
|
22 |
-
# Load the original video
|
23 |
-
video = VideoFileClip(video_file)
|
24 |
-
|
25 |
-
# Create a list of TextClips for each subtitle
|
26 |
-
text_clips = []
|
27 |
-
for start, end, text in subtitles:
|
28 |
-
text_clip = (TextClip(text, fontsize=24, color='white', bg_color='black', size=video.size)
|
29 |
-
.set_start(start)
|
30 |
-
.set_duration(end - start)
|
31 |
-
.set_position(('center', 'bottom'))) # Position the subtitle at the bottom center
|
32 |
-
text_clips.append(text_clip)
|
33 |
-
|
34 |
-
# Overlay the subtitles on the video
|
35 |
-
final_video = CompositeVideoClip([video] + text_clips)
|
36 |
|
37 |
-
#
|
38 |
-
|
39 |
-
final_video.write_videofile(subtitled_video_file, codec='libx264', audio_codec='aac')
|
40 |
|
41 |
-
return
|
42 |
|
43 |
# Gradio interface
|
44 |
iface = gr.Interface(
|
45 |
fn=transcribe_video,
|
46 |
-
inputs=
|
47 |
-
|
|
|
|
|
|
|
48 |
title="Video Subtitle Generator",
|
49 |
-
description="Upload a video file to generate
|
50 |
)
|
51 |
|
52 |
if __name__ == "__main__":
|
|
|
1 |
import gradio as gr
|
2 |
import whisper
|
3 |
import os
|
|
|
4 |
|
5 |
# Load the Whisper model
|
6 |
model = whisper.load_model("base") # Choose 'tiny', 'base', 'small', 'medium', or 'large'
|
7 |
|
8 |
+
def write_srt(transcription, output_file):
|
9 |
+
with open(output_file, "w") as f:
|
10 |
+
for i, segment in enumerate(transcription['segments']):
|
11 |
+
start = segment['start']
|
12 |
+
end = segment['end']
|
13 |
+
text = segment['text']
|
14 |
+
# Format timestamps for SRT
|
15 |
+
start_time = whisper.utils.format_timestamp(start)
|
16 |
+
end_time = whisper.utils.format_timestamp(end)
|
17 |
+
print(f"Writing subtitle {i + 1}: {text.strip()} ({start_time} --> {end_time})") # Debug print
|
18 |
+
f.write(f"{i + 1}\n")
|
19 |
+
f.write(f"{start_time} --> {end_time}\n")
|
20 |
+
f.write(f"{text.strip()}\n\n")
|
21 |
+
|
22 |
+
def transcribe_video(video_file, language):
|
23 |
# Transcribe the video to generate subtitles
|
24 |
+
result = model.transcribe(video_file, language=language)
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
+
# Save the transcription to an .srt file
|
27 |
+
srt_file = "generated_subtitles.srt"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
+
# Write the transcription as subtitles
|
30 |
+
write_srt(result, srt_file)
|
|
|
31 |
|
32 |
+
return srt_file
|
33 |
|
34 |
# Gradio interface
|
35 |
iface = gr.Interface(
|
36 |
fn=transcribe_video,
|
37 |
+
inputs=[
|
38 |
+
gr.File(label="Upload Video"),
|
39 |
+
gr.Dropdown(label="Select Language", choices=["en", "es", "fr", "de", "it", "pt"], value="en")
|
40 |
+
],
|
41 |
+
outputs=gr.File(label="Download Subtitles"),
|
42 |
title="Video Subtitle Generator",
|
43 |
+
description="Upload a video file to generate subtitles using Whisper. Select the language of the video."
|
44 |
)
|
45 |
|
46 |
if __name__ == "__main__":
|