Update pipeline.py
Browse files- pipeline.py +2 -13
pipeline.py
CHANGED
@@ -48,8 +48,7 @@ from diffusers.models import ControlNetModel
|
|
48 |
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
|
49 |
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
50 |
from diffusers.utils import deprecate
|
51 |
-
|
52 |
-
from PIL import Image
|
53 |
import torchvision
|
54 |
import math
|
55 |
|
@@ -668,17 +667,7 @@ class AnimateDiffPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdap
|
|
668 |
)
|
669 |
|
670 |
if init_image is not None:
|
671 |
-
start_image = (
|
672 |
-
(
|
673 |
-
torchvision.transforms.functional.pil_to_tensor(
|
674 |
-
PIL.Image.open(init_image).resize((width, height))
|
675 |
-
)
|
676 |
-
/ 255
|
677 |
-
)[:3, :, :]
|
678 |
-
.to("cuda")
|
679 |
-
.to(torch.bfloat16)
|
680 |
-
.unsqueeze(0)
|
681 |
-
)
|
682 |
start_image = (
|
683 |
self.vae.encode(start_image.mul(2).sub(1))
|
684 |
.latent_dist.sample()
|
|
|
48 |
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
|
49 |
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
50 |
from diffusers.utils import deprecate
|
51 |
+
|
|
|
52 |
import torchvision
|
53 |
import math
|
54 |
|
|
|
667 |
)
|
668 |
|
669 |
if init_image is not None:
|
670 |
+
start_image = ((torchvision.transforms.functional.pil_to_tensor(init_image))/ 255 )[:3, :, :].to("cuda").to(torch.bfloat16).unsqueeze(0))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
671 |
start_image = (
|
672 |
self.vae.encode(start_image.mul(2).sub(1))
|
673 |
.latent_dist.sample()
|