Spaces:
Sleeping
Sleeping
LittleLirow
commited on
Commit
·
9d6eb99
1
Parent(s):
7f53786
Make output video display properly
Browse files
app.py
CHANGED
@@ -10,13 +10,13 @@ def generate_video(text, auth_openai, auth_elevenlabs, auth_replicate, auth_rev)
|
|
10 |
generated_story = story.text2story(text, auth_openai)
|
11 |
narrator.text2voice(generated_story, "audio_out.mp3", auth_elevenlabs, 5)
|
12 |
deforum_str, max_frames = subtitles.audio2subtitle(auth_rev)
|
13 |
-
|
14 |
# generated_music = bgm.text2audio(text=text, duration=20, guidance_scale=5, random_seed=24, n_candidates=3)
|
15 |
-
generated_video = join.join_artifacts()
|
16 |
|
|
|
17 |
return generated_video
|
18 |
|
19 |
-
def download_video(
|
20 |
pass
|
21 |
|
22 |
with gr.Blocks() as demo:
|
@@ -37,6 +37,6 @@ with gr.Blocks() as demo:
|
|
37 |
download_button = gr.Button("Download")
|
38 |
|
39 |
generate_button.click(generate_video, inputs=[prompt_input, auth_openai_input, auth_eleven_input, auth_replicate_input, auth_rev_input], outputs=[video_out])
|
40 |
-
download_button.click(download_video, inputs=video_out)
|
41 |
|
42 |
demo.launch(debug=True, enable_queue=True)
|
|
|
10 |
generated_story = story.text2story(text, auth_openai)
|
11 |
narrator.text2voice(generated_story, "audio_out.mp3", auth_elevenlabs, 5)
|
12 |
deforum_str, max_frames = subtitles.audio2subtitle(auth_rev)
|
13 |
+
animation.story2video(deforum_str, max_frames, auth_replicate)
|
14 |
# generated_music = bgm.text2audio(text=text, duration=20, guidance_scale=5, random_seed=24, n_candidates=3)
|
|
|
15 |
|
16 |
+
generated_video = join.join_artifacts()
|
17 |
return generated_video
|
18 |
|
19 |
+
def download_video(video_path):
|
20 |
pass
|
21 |
|
22 |
with gr.Blocks() as demo:
|
|
|
37 |
download_button = gr.Button("Download")
|
38 |
|
39 |
generate_button.click(generate_video, inputs=[prompt_input, auth_openai_input, auth_eleven_input, auth_replicate_input, auth_rev_input], outputs=[video_out])
|
40 |
+
download_button.click(download_video, inputs=[video_out])
|
41 |
|
42 |
demo.launch(debug=True, enable_queue=True)
|
join.py
CHANGED
@@ -1,6 +1,9 @@
|
|
1 |
import ffmpeg
|
|
|
2 |
|
3 |
def join_artifacts():
|
4 |
audio_in = ffmpeg.input('audio_out.mp3')
|
5 |
video_in = ffmpeg.input('video_out.mp4')
|
6 |
-
ffmpeg.concat(video_in.filter("subtitles", "audio_out.srt"), audio_in, v=1, a=1).output('av_out.mp4').run()
|
|
|
|
|
|
1 |
import ffmpeg
|
2 |
+
import os
|
3 |
|
4 |
def join_artifacts():
|
5 |
audio_in = ffmpeg.input('audio_out.mp3')
|
6 |
video_in = ffmpeg.input('video_out.mp4')
|
7 |
+
ffmpeg.concat(video_in.filter("subtitles", "audio_out.srt"), audio_in, v=1, a=1).output('av_out.mp4').run()
|
8 |
+
av_path = os.path.join(os.path.dirname(__file__), "av_out.mp4")
|
9 |
+
return av_path
|