vilarin commited on
Commit
fec3be6
·
verified ·
1 Parent(s): d513f78

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -5
app.py CHANGED
@@ -10,16 +10,17 @@ from glob import glob
10
  from pathlib import Path
11
  from typing import Optional
12
 
13
- from diffusers import StableVideoDiffusionPipeline
14
  from diffusers.utils import load_image, export_to_video
15
 
16
  import uuid
17
- # from huggingface_hub import hf_hub_download
18
 
19
 
20
- # os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
21
- # HF_TOKEN = os.environ.get("HF_TOKEN", None)
22
  # Constants
 
23
  model = "ECNU-CILab/ExVideo-SVD-128f-v1"
24
 
25
  MAX_SEED = np.iinfo(np.int32).max
@@ -37,11 +38,28 @@ JS = """function () {
37
  }
38
  }"""
39
 
 
 
 
 
 
 
 
 
 
40
 
41
  # Ensure model and scheduler are initialized in GPU-enabled function
42
  if torch.cuda.is_available():
 
 
 
 
 
 
 
43
  pipe = StableVideoDiffusionPipeline.from_pretrained(
44
- model,
 
45
  torch_dtype=torch.float16,
46
  variant="fp16").to("cuda")
47
 
 
10
  from pathlib import Path
11
  from typing import Optional
12
 
13
+ from diffusers import StableVideoDiffusionPipeline, UNetSpatioTemporalConditionControlNetModel
14
  from diffusers.utils import load_image, export_to_video
15
 
16
  import uuid
17
+ from huggingface_hub import hf_hub_download
18
 
19
 
20
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
21
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
22
  # Constants
23
+ base = "stabilityai/stable-video-diffusion-img2vid-xt"
24
  model = "ECNU-CILab/ExVideo-SVD-128f-v1"
25
 
26
  MAX_SEED = np.iinfo(np.int32).max
 
38
  }
39
  }"""
40
 
41
+ downloaded_model_path = hf_hub_download(
42
+ repo_id=model,
43
+ filename=model.fp16.safetensors,
44
+ local_dir="model"
45
+ )
46
+
47
+ MODEL_PATH = "./model/"
48
+
49
+
50
 
51
  # Ensure model and scheduler are initialized in GPU-enabled function
52
  if torch.cuda.is_available():
53
+
54
+ unet = UNetSpatioTemporalConditionControlNetModel.from_pretrained(
55
+ MODEL_PATH,
56
+ low_cpu_mem_usage=True,
57
+ variant="fp16",
58
+ )
59
+
60
  pipe = StableVideoDiffusionPipeline.from_pretrained(
61
+ base,
62
+ unet=unet,
63
  torch_dtype=torch.float16,
64
  variant="fp16").to("cuda")
65