File size: 3,109 Bytes
0ebc3c4 2e38275 8e972b4 89b6dcb 2224f3b 89b6dcb 8e972b4 2224f3b 4498d9d 8f3a4e5 2e38275 8f3a4e5 fb39589 98acae8 c6b75bd 2e38275 89b6dcb 8e972b4 89b6dcb aac6c7c 2e38275 77f594c 2e38275 3b6cf03 2e38275 2224f3b 77f594c fb39589 8e972b4 2224f3b 9fc4674 bc1258a 2e38275 bc1258a 13a5199 98acae8 77f594c 968d2c9 bc1258a 98acae8 89b6dcb 8e972b4 89b6dcb 98acae8 89b6dcb 2224f3b 1e541e6 2224f3b 77f594c 89b6dcb 77f594c f8c62cb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
import os
import pathlib
import shutil
import subprocess
import sys
import uuid
from pathlib import Path
import gradio as gr
import opengraph
import requests
from moviepy.editor import AudioFileClip
output_dir = Path("temp/").absolute()
output_dir.mkdir(exist_ok=True, parents=True)
class AudioInput:
def __init__(self, path: str, start_time: int, run_for: int):
self.path = path
self.start_time = start_time
self.run_for = run_for
def process_inputs(
prompt: str, audio_path: str, spotify_url: str, start_time: int, run_for: int
) -> str:
audio_input = AudioInput(audio_path, start_time, run_for)
if spotify_url:
spotify = SpotifyApi(spotify_url)
audio_input.path = spotify.download_episode()
spotify_image = spotify.download_image()
images = get_stable_diffusion_images(prompt)
video = animate_images(images, audio_input, spotify_image)
return video
def animate_images(
image_paths: list[str], audio_input: AudioInput, overlay_image_path: str
) -> str:
from animate import ( # Only import after git clone and when necessary takes loooong
create_mp4_with_audio,
get_video_frames,
)
# Generate a random folder name and change directories to there
foldername = str(uuid.uuid4())[:8]
vid_output_dir = Path(output_dir / foldername)
vid_output_dir.mkdir(exist_ok=True, parents=True)
audio_clip = AudioFileClip(audio_input.path)
audio_clip = audio_clip.subclip(
audio_input.start_time, audio_input.start_time + audio_input.run_for
)
video_frames, cv2_images = get_video_frames(image_paths, vid_output_dir)
path = Path(vid_output_dir / "output_final.mp4")
return create_mp4_with_audio(
video_frames,
cv2_images,
audio_clip.duration,
audio_clip,
path,
overlay_image_path.as_posix(),
)
def get_stable_diffusion_images(prompt) -> str:
stable_diffusion = gr.Blocks.load(name="spaces/runwayml/stable-diffusion-v1-5")
gallery_dir = stable_diffusion(prompt, fn_index=1)
return [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)][:2]
iface = gr.Interface(
fn=process_inputs,
inputs=[
gr.Textbox(label="Describe your podcast clip"),
gr.Audio(type="filepath", label="Upload an mp3"),
gr.Textbox(label="Or Paste a spotify episode link"),
gr.Number(label="Start time (in seconds)"),
gr.Number(label="Run for (in seconds)"),
],
outputs="video",
)
if __name__ == "__main__":
# Show gradio version
print(f"Gradio version: {gr.__version__}")
subprocess.call(
[
"git",
"clone",
"https://github.com/google-research/frame-interpolation",
"frame_interpolation",
]
) # install frame_interplation I guess
sys.path.append("frame_interpolation")
# My installs
os.chdir(
output_dir
) # change working directory to output_dir because the hf spaces model has no option to specify output directory ¯\_(ツ)_/¯
iface.launch()
|