|
import gradio as gr |
|
from diffusers import DiffusionPipeline |
|
from diffusers.utils import export_to_video |
|
import torch |
|
import os |
|
from PIL import Image |
|
import spaces |
|
|
|
|
|
pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt") |
|
|
|
|
|
interface = gr.Interface( |
|
fn=lambda img: generate_video(img), |
|
inputs=gr.Image(type="pil"), |
|
outputs=gr.Video(), |
|
title="Stable Video Diffusion", |
|
description="Upload an image to generate a video", |
|
theme="soft" |
|
) |
|
|
|
@spaces.GPU(duration=360) |
|
def generate_video(image): |
|
""" |
|
Generates a video from an input image using the pipeline. |
|
|
|
Args: |
|
image: A PIL Image object representing the input image. |
|
|
|
Returns: |
|
The path of a video file. |
|
""" |
|
video_frames = pipeline(image=image, num_inference_steps=10).images |
|
|
|
|
|
os.makedirs("outputs", exist_ok=True) |
|
base_count = len(glob(os.path.join("outputs", "*.mp4"))) |
|
video_path = os.path.join("outputs", f"{base_count:06d}.mp4") |
|
export_to_video(video_frames, video_path, fps=6) |
|
|
|
return video_path |
|
|
|
|
|
interface.launch() |