mrcuddle commited on
Commit
fe4e57e
·
verified ·
1 Parent(s): d94267e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -4
app.py CHANGED
@@ -2,15 +2,19 @@ import torch
2
  from PIL import Image
3
  import imageio
4
  from diffusers import StableVideoDiffusionPipeline
 
5
  import gradio as gr
6
 
7
  # Load the pipeline
8
  pipe = StableVideoDiffusionPipeline.from_pretrained(
9
  "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16"
10
  )
 
 
11
  pipe.enable_model_cpu_offload()
 
12
 
13
- def generate_video(image, seed=42, fps=7):
14
  # Resize the image
15
  image = image.resize((1024, 576))
16
 
@@ -18,11 +22,11 @@ def generate_video(image, seed=42, fps=7):
18
  generator = torch.manual_seed(seed)
19
 
20
  # Generate the frames
21
- frames = pipe(image, decode_chunk_size=8, generator=generator).frames[0]
22
 
23
  # Export the frames to a video
24
  output_path = "generated.mp4"
25
- imageio.mimwrite(output_path, frames, fps=fps)
26
 
27
  return output_path
28
 
@@ -32,7 +36,9 @@ iface = gr.Interface(
32
  inputs=[
33
  gr.Image(type="pil", label="Upload Image"),
34
  gr.Number(label="Seed", value=42),
35
- gr.Number(label="FPS", value=7)
 
 
36
  ],
37
  outputs=gr.Video(label="Generated Video"),
38
  title="Stable Video Diffusion",
 
2
  from PIL import Image
3
  import imageio
4
  from diffusers import StableVideoDiffusionPipeline
5
+ from diffusers.utils import load_image, export_to_video
6
  import gradio as gr
7
 
8
  # Load the pipeline
9
  pipe = StableVideoDiffusionPipeline.from_pretrained(
10
  "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16"
11
  )
12
+ pipe.to("cuda")
13
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
14
  pipe.enable_model_cpu_offload()
15
+ pipe.unet.enable_forward_chunking()
16
 
17
+ def generate_video(image, seed=42, fps=7, motion_bucket_id=180, noise_aug_strength=0.1):
18
  # Resize the image
19
  image = image.resize((1024, 576))
20
 
 
22
  generator = torch.manual_seed(seed)
23
 
24
  # Generate the frames
25
+ frames = pipe(image, decode_chunk_size=2, generator=generator, num_frames=25, motion_bucket_id=motion_bucket_id, noise_aug_strength=noise_aug_strength).frames[0]
26
 
27
  # Export the frames to a video
28
  output_path = "generated.mp4"
29
+ export_to_video(frames, output_path, fps=fps)
30
 
31
  return output_path
32
 
 
36
  inputs=[
37
  gr.Image(type="pil", label="Upload Image"),
38
  gr.Number(label="Seed", value=42),
39
+ gr.Number(label="FPS", value=7),
40
+ gr.Number(label="Motion Bucket ID", value=180),
41
+ gr.Number(label="Noise Aug Strength", value=0.1)
42
  ],
43
  outputs=gr.Video(label="Generated Video"),
44
  title="Stable Video Diffusion",