Spaces:
Runtime error
Runtime error
adapters added
Browse files
app.py
CHANGED
@@ -21,24 +21,20 @@ adapter_options = {
|
|
21 |
device = "cuda"
|
22 |
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
|
23 |
model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
)
|
28 |
-
|
29 |
-
"guoyww/animatediff-motion-lora-zoom-out", adapter_name="pan-left",
|
30 |
-
)
|
31 |
-
scheduler = DDIMScheduler.from_pretrained(
|
32 |
model_id,
|
33 |
subfolder="scheduler",
|
34 |
clip_sample=False,
|
35 |
timestep_spacing="linspace",
|
36 |
beta_schedule="linear",
|
37 |
steps_offset=1,
|
38 |
-
)
|
39 |
-
pipe.scheduler = scheduler
|
40 |
-
|
41 |
-
def generate_video(prompt, guidance_scale, num_inference_steps, adapter_choices):
|
42 |
pipe.to(device)
|
43 |
|
44 |
# Set adapters based on user selection
|
@@ -53,7 +49,7 @@ def generate_video(prompt, guidance_scale, num_inference_steps, adapter_choices)
|
|
53 |
|
54 |
output = pipe(
|
55 |
prompt=prompt,
|
56 |
-
negative_prompt=
|
57 |
num_frames=16,
|
58 |
guidance_scale=guidance_scale,
|
59 |
num_inference_steps=num_inference_steps,
|
@@ -66,9 +62,11 @@ def generate_video(prompt, guidance_scale, num_inference_steps, adapter_choices)
|
|
66 |
|
67 |
|
68 |
iface = gr.Interface(
|
|
|
69 |
fn=generate_video,
|
70 |
inputs=[
|
71 |
gr.Textbox(label="Enter your prompt"),
|
|
|
72 |
gr.Slider(minimum=0.5, maximum=10, value=7.5, label="Guidance Scale"),
|
73 |
gr.Slider(minimum=4, maximum=24, step=4, value=4, label="Inference Steps"),
|
74 |
gr.CheckboxGroup(adapter_options.keys(), label="Adapter Choice",type='value'),
|
|
|
21 |
device = "cuda"
|
22 |
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
|
23 |
model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
|
24 |
+
|
25 |
+
@spaces.GPU
|
26 |
+
def generate_video(prompt,negative_prompt, guidance_scale, num_inference_steps, adapter_choices):
|
27 |
+
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16).to(device)
|
28 |
+
scheduler = DDIMScheduler.from_pretrained(
|
|
|
|
|
|
|
29 |
model_id,
|
30 |
subfolder="scheduler",
|
31 |
clip_sample=False,
|
32 |
timestep_spacing="linspace",
|
33 |
beta_schedule="linear",
|
34 |
steps_offset=1,
|
35 |
+
)
|
36 |
+
pipe.scheduler = scheduler
|
37 |
+
|
|
|
38 |
pipe.to(device)
|
39 |
|
40 |
# Set adapters based on user selection
|
|
|
49 |
|
50 |
output = pipe(
|
51 |
prompt=prompt,
|
52 |
+
negative_prompt=negative_prompt,
|
53 |
num_frames=16,
|
54 |
guidance_scale=guidance_scale,
|
55 |
num_inference_steps=num_inference_steps,
|
|
|
62 |
|
63 |
|
64 |
iface = gr.Interface(
|
65 |
+
theme=gr.themes.Monochrome(primary_hue="red", secondary_hue="pink"),
|
66 |
fn=generate_video,
|
67 |
inputs=[
|
68 |
gr.Textbox(label="Enter your prompt"),
|
69 |
+
gr.Textbox(label="Negative Prompt"),
|
70 |
gr.Slider(minimum=0.5, maximum=10, value=7.5, label="Guidance Scale"),
|
71 |
gr.Slider(minimum=4, maximum=24, step=4, value=4, label="Inference Steps"),
|
72 |
gr.CheckboxGroup(adapter_options.keys(), label="Adapter Choice",type='value'),
|