Harumiiii's picture
Update app.py
780c782 verified
import torch
from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
from diffusers.utils import export_to_gif
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
import gradio as gr
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16 if torch.cuda.is_available() else torch.float32
step = 4 # Options: [1,2,4,8]
repo = "ByteDance/AnimateDiff-Lightning"
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
base = "emilianJR/epiCRealism"
adapter = MotionAdapter().to(device, dtype)
adapter.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device))
pipe = AnimateDiffPipeline.from_pretrained(base, motion_adapter=adapter, torch_dtype=dtype).to(device)
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
def animate_image(prompt, guidance_scale, num_inference_steps):
output = pipe(prompt=prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps)
gif_path = "animation.gif"
export_to_gif(output.frames[0], gif_path)
return gif_path
# Define the Gradio Interface
with gr.Blocks() as demo:
gr.Markdown("# AnimateDiff API")
with gr.Row():
prompt = gr.Textbox(label="Prompt", placeholder="A girl smiling", value="A girl smiling")
guidance_scale = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=10.0, value=1.0, step=0.1)
num_inference_steps = gr.Slider(label="Steps", minimum=1, maximum=8, value=step, step=1)
gif_output = gr.Image(label="Generated Animation")
# Button to run the pipeline
run_button = gr.Button("Generate Animation")
run_button.click(animate_image, inputs=[prompt, guidance_scale, num_inference_steps], outputs=[gif_output])
# Launch the interface
demo.launch()