OmPrakashSingh1704 commited on
Commit
5612db5
·
1 Parent(s): 0c13c1c
options/Video_model/Model.py CHANGED
@@ -1,19 +1,58 @@
1
  import torch
2
  from diffusers import StableVideoDiffusionPipeline
3
- # from diffusers.utils import load_image, export_to_video
4
  from PIL import Image
5
- device = "cuda" if torch.cuda.is_available() else "cpu"
6
- print("Using device for video:", device)
7
 
8
- pipeline = StableVideoDiffusionPipeline.from_pretrained(
9
- "stabilityai/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16, variant="fp16"
10
- ).to(device)
11
- # pipeline.enable_model_cpu_offload()
12
- def Video(image):
13
- image = Image.fromarray(image)
14
- image = image.resize((1024, 576))
15
 
16
- generator = torch.Generator(device=device).manual_seed(42)
17
- frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0]
18
- # export_to_video(frames, "generated.mp4", fps=7)
19
- return frames
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
  from diffusers import StableVideoDiffusionPipeline
3
+ from diffusers.utils import load_image, save_video
4
  from PIL import Image
5
+ from tdd_svd_scheduler import TDDSVDStochasticIterativeScheduler
6
+ from utils import load_lora_weights, save_video
7
 
8
+ svd_path = 'stabilityai/stable-video-diffusion-img2vid-xt-1-1'
9
+ lora_repo_path = 'RED-AIGC/TDD'
10
+ lora_weight_name = 'svd-xt-1-1_tdd_lora_weights.safetensors'
 
 
 
 
11
 
12
+ if torch.cuda.is_available():
13
+ noise_scheduler = TDDSVDStochasticIterativeScheduler(num_train_timesteps = 250, sigma_min = 0.002, sigma_max = 700.0, sigma_data = 1.0,
14
+ s_noise = 1.0, rho = 7, clip_denoised = False)
15
+ pipeline = StableVideoDiffusionPipeline.from_pretrained(svd_path, scheduler = noise_scheduler, torch_dtype = torch.float16, variant = "fp16").to('cuda')
16
+ load_lora_weights(pipeline.unet, lora_repo_path, weight_name = lora_weight_name)
17
+
18
+ @spaces.GPU
19
+ def Video(
20
+ image: Image,
21
+ seed: Optional[int] = 1,
22
+ randomize_seed: bool = False,
23
+ num_inference_steps: int = 4,
24
+ eta: float = 0.3,
25
+ min_guidance_scale: float = 1.0,
26
+ max_guidance_scale: float = 1.0,
27
+ fps: int = 7,
28
+ width: int = 512,
29
+ height: int = 512,
30
+ num_frames: int = 25,
31
+ motion_bucket_id: int = 127,
32
+ output_folder: str = "outputs_gradio",
33
+ ):
34
+ pipeline.scheduler.set_eta(eta)
35
+
36
+ if randomize_seed:
37
+ seed = random.randint(0, max_64_bit_int)
38
+ generator = torch.manual_seed(seed)
39
+
40
+ os.makedirs(output_folder, exist_ok=True)
41
+ base_count = len(glob(os.path.join(output_folder, "*.mp4")))
42
+ video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
43
+
44
+ with torch.autocast("cuda"):
45
+ frames = pipeline(
46
+ image, height = height, width = width,
47
+ num_inference_steps = num_inference_steps,
48
+ min_guidance_scale = min_guidance_scale,
49
+ max_guidance_scale = max_guidance_scale,
50
+ num_frames = num_frames, fps = fps, motion_bucket_id = motion_bucket_id,
51
+ decode_chunk_size = 8,
52
+ noise_aug_strength = 0.02,
53
+ generator = generator,
54
+ ).frames[0]
55
+ save_video(frames, video_path, fps = fps, quality = 5.0)
56
+ torch.manual_seed(seed)
57
+
58
+ return video_path, seed
options/Video_model/__pycache__/Model.cpython-310.pyc CHANGED
Binary files a/options/Video_model/__pycache__/Model.cpython-310.pyc and b/options/Video_model/__pycache__/Model.cpython-310.pyc differ