Update README.md
#1
by
sayakpaul
HF staff
- opened
README.md
CHANGED
@@ -29,7 +29,7 @@ from diffusers import KandinskyPipeline, KandinskyPriorPipeline
|
|
29 |
import torch
|
30 |
|
31 |
|
32 |
-
pipe_prior = KandinskyPriorPipeline.from_pretrained("
|
33 |
pipe_prior.to("cuda")
|
34 |
|
35 |
prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting"
|
@@ -43,7 +43,7 @@ zero_image_emb = pipe_prior(
|
|
43 |
negative_prompt, guidance_scale=1.0, num_inference_steps=25, generator=generator, negative_prompt=negative_prompt
|
44 |
).images
|
45 |
|
46 |
-
pipe = KandinskyPipeline.from_pretrained("
|
47 |
pipe.to("cuda")
|
48 |
|
49 |
|
@@ -81,11 +81,11 @@ original_image = Image.open(BytesIO(response.content)).convert("RGB")
|
|
81 |
original_image = original_image.resize((768, 512))
|
82 |
|
83 |
# create prior
|
84 |
-
pipe_prior = KandinskyPriorPipeline.from_pretrained("
|
85 |
pipe_prior.to("cuda")
|
86 |
|
87 |
# create img2img pipeline
|
88 |
-
pipe = KandinskyImg2ImgPipeline.from_pretrained("
|
89 |
pipe.to("cuda")
|
90 |
|
91 |
prompt = "A fantasy landscape, Cinematic lighting"
|
@@ -126,7 +126,7 @@ import PIL
|
|
126 |
import torch
|
127 |
from torchvision import transforms
|
128 |
|
129 |
-
pipe_prior = KandinskyPriorPipeline.from_pretrained("
|
130 |
pipe_prior.to("cuda")
|
131 |
|
132 |
img1 = load_image(
|
@@ -141,7 +141,7 @@ images_texts = ["a cat", img1, img2]
|
|
141 |
weights = [0.3, 0.3, 0.4]
|
142 |
image_emb, zero_image_emb = pipe_prior.interpolate(images_texts, weights)
|
143 |
|
144 |
-
pipe = KandinskyPipeline.from_pretrained("
|
145 |
pipe.to("cuda")
|
146 |
|
147 |
image = pipe(
|
|
|
29 |
import torch
|
30 |
|
31 |
|
32 |
+
pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16)
|
33 |
pipe_prior.to("cuda")
|
34 |
|
35 |
prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting"
|
|
|
43 |
negative_prompt, guidance_scale=1.0, num_inference_steps=25, generator=generator, negative_prompt=negative_prompt
|
44 |
).images
|
45 |
|
46 |
+
pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
|
47 |
pipe.to("cuda")
|
48 |
|
49 |
|
|
|
81 |
original_image = original_image.resize((768, 512))
|
82 |
|
83 |
# create prior
|
84 |
+
pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16)
|
85 |
pipe_prior.to("cuda")
|
86 |
|
87 |
# create img2img pipeline
|
88 |
+
pipe = KandinskyImg2ImgPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
|
89 |
pipe.to("cuda")
|
90 |
|
91 |
prompt = "A fantasy landscape, Cinematic lighting"
|
|
|
126 |
import torch
|
127 |
from torchvision import transforms
|
128 |
|
129 |
+
pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16)
|
130 |
pipe_prior.to("cuda")
|
131 |
|
132 |
img1 = load_image(
|
|
|
141 |
weights = [0.3, 0.3, 0.4]
|
142 |
image_emb, zero_image_emb = pipe_prior.interpolate(images_texts, weights)
|
143 |
|
144 |
+
pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
|
145 |
pipe.to("cuda")
|
146 |
|
147 |
image = pipe(
|