Spaces:
Runtime error
Runtime error
File size: 3,302 Bytes
5bd9da0 0413f13 5bd9da0 4ffd894 1f56119 5bd9da0 455cd10 0413f13 455cd10 5bd9da0 30787d4 4ffd894 30787d4 5bd9da0 bb0b5aa 455cd10 4ffd894 c19b47e 5bd9da0 4ffd894 30787d4 5bd9da0 0ee390b c19b47e 98be4cd 455cd10 c19b47e 62a5a51 c19b47e 5bd9da0 73979b7 5c8bb38 538f7a3 5bd9da0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
import gradio as gr
import torch
from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter
from diffusers.utils import export_to_gif
from diffusers.utils import export_to_video
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import uuid
import spaces
# Available adapters (replace with your actual adapter names)
adapter_options = {
"zoom-out":"guoyww/animatediff-motion-lora-zoom-out",
"zoom-in":"guoyww/animatediff-motion-lora-zoom-in",
"pan-left":"guoyww/animatediff-motion-lora-pan-left",
"pan-right":"guoyww/animatediff-motion-lora-pan-right",
"roll-clockwise":"guoyww/animatediff-motion-lora-rolling-clockwise",
"roll-anticlockwise":"guoyww/animatediff-motion-lora-rolling-anticlockwise",
"tilt-up":"guoyww/animatediff-motion-lora-tilt-up",
"tilt-down":"guoyww/animatediff-motion-lora-tilt-down"
}
def load_cached_examples():
examples = [
["a cat playing with a ball of yarn", "blurry", 7.5, 12, ["zoom-in"]],
["a dog running in a field", "dark, indoors", 8.0, 8, ["pan-left", "tilt-up"]],
]
return examples
device = "cuda"
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16).to(device)
scheduler = DDIMScheduler.from_pretrained(
model_id,
subfolder="scheduler",
clip_sample=False,
timestep_spacing="linspace",
beta_schedule="linear",
steps_offset=1,
)
pipe.scheduler = scheduler
@spaces.GPU
def generate_video(prompt,negative_prompt, guidance_scale, num_inference_steps, adapter_choices):
pipe.to(device)
# Set adapters based on user selection
if adapter_choices:
for i in range(len(adapter_choices)):
adapter_name = adapter_choices[i]
pipe.load_lora_weights(
adapter_options[adapter_name], adapter_name=adapter_name,
)
pipe.set_adapters(adapter_choices, adapter_weights=[1.0] * len(adapter_choices))
print(adapter_choices)
output = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
num_frames=16,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
)
name = str(uuid.uuid4()).replace("-", "")
path = f"/tmp/{name}.mp4"
export_to_video(output.frames[0], path, fps=10)
return path
iface = gr.Interface(
theme=gr.themes.Soft(primary_hue="cyan", secondary_hue="teal"),
fn=generate_video,
inputs=[
gr.Textbox(label="Prompt"),
gr.Textbox(label="Negative Prompt"),
gr.Slider(minimum=0.5, maximum=10, value=7.5, label="Guidance Scale"),
gr.Slider(minimum=4, maximum=24, step=4, value=4, label="Inference Steps"),
gr.CheckboxGroup(adapter_options.keys(), label="Adapter Choice",type='value'),
],
outputs=gr.Video(label="Generated Video"),
examples = [
["Urban ambiance, man walking, neon lights, rain, wet floor, high quality", "bad quality", 7.5, 24, []],
["Nature, farms, mountains in background, drone shot, high quality","bad quality" ,8.0, 24, []],
],
cache_examples=True
)
iface.launch() |