Upload 10 files
Browse files- stable-diffusion/scripts/download_first_stages.sh +41 -0
- stable-diffusion/scripts/download_models.sh +49 -0
- stable-diffusion/scripts/img2img.py +293 -0
- stable-diffusion/scripts/inpaint.py +98 -0
- stable-diffusion/scripts/knn2img.py +398 -0
- stable-diffusion/scripts/latent_imagenet_diffusion.ipynb +0 -0
- stable-diffusion/scripts/sample_diffusion.py +313 -0
- stable-diffusion/scripts/tests/test_watermark.py +18 -0
- stable-diffusion/scripts/train_searcher.py +147 -0
- stable-diffusion/scripts/txt2img.py +352 -0
stable-diffusion/scripts/download_first_stages.sh
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
wget -O models/first_stage_models/kl-f4/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f4.zip
|
3 |
+
wget -O models/first_stage_models/kl-f8/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f8.zip
|
4 |
+
wget -O models/first_stage_models/kl-f16/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f16.zip
|
5 |
+
wget -O models/first_stage_models/kl-f32/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f32.zip
|
6 |
+
wget -O models/first_stage_models/vq-f4/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f4.zip
|
7 |
+
wget -O models/first_stage_models/vq-f4-noattn/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f4-noattn.zip
|
8 |
+
wget -O models/first_stage_models/vq-f8/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f8.zip
|
9 |
+
wget -O models/first_stage_models/vq-f8-n256/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f8-n256.zip
|
10 |
+
wget -O models/first_stage_models/vq-f16/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f16.zip
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
cd models/first_stage_models/kl-f4
|
15 |
+
unzip -o model.zip
|
16 |
+
|
17 |
+
cd ../kl-f8
|
18 |
+
unzip -o model.zip
|
19 |
+
|
20 |
+
cd ../kl-f16
|
21 |
+
unzip -o model.zip
|
22 |
+
|
23 |
+
cd ../kl-f32
|
24 |
+
unzip -o model.zip
|
25 |
+
|
26 |
+
cd ../vq-f4
|
27 |
+
unzip -o model.zip
|
28 |
+
|
29 |
+
cd ../vq-f4-noattn
|
30 |
+
unzip -o model.zip
|
31 |
+
|
32 |
+
cd ../vq-f8
|
33 |
+
unzip -o model.zip
|
34 |
+
|
35 |
+
cd ../vq-f8-n256
|
36 |
+
unzip -o model.zip
|
37 |
+
|
38 |
+
cd ../vq-f16
|
39 |
+
unzip -o model.zip
|
40 |
+
|
41 |
+
cd ../..
|
stable-diffusion/scripts/download_models.sh
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
wget -O models/ldm/celeba256/celeba-256.zip https://ommer-lab.com/files/latent-diffusion/celeba.zip
|
3 |
+
wget -O models/ldm/ffhq256/ffhq-256.zip https://ommer-lab.com/files/latent-diffusion/ffhq.zip
|
4 |
+
wget -O models/ldm/lsun_churches256/lsun_churches-256.zip https://ommer-lab.com/files/latent-diffusion/lsun_churches.zip
|
5 |
+
wget -O models/ldm/lsun_beds256/lsun_beds-256.zip https://ommer-lab.com/files/latent-diffusion/lsun_bedrooms.zip
|
6 |
+
wget -O models/ldm/text2img256/model.zip https://ommer-lab.com/files/latent-diffusion/text2img.zip
|
7 |
+
wget -O models/ldm/cin256/model.zip https://ommer-lab.com/files/latent-diffusion/cin.zip
|
8 |
+
wget -O models/ldm/semantic_synthesis512/model.zip https://ommer-lab.com/files/latent-diffusion/semantic_synthesis.zip
|
9 |
+
wget -O models/ldm/semantic_synthesis256/model.zip https://ommer-lab.com/files/latent-diffusion/semantic_synthesis256.zip
|
10 |
+
wget -O models/ldm/bsr_sr/model.zip https://ommer-lab.com/files/latent-diffusion/sr_bsr.zip
|
11 |
+
wget -O models/ldm/layout2img-openimages256/model.zip https://ommer-lab.com/files/latent-diffusion/layout2img_model.zip
|
12 |
+
wget -O models/ldm/inpainting_big/model.zip https://ommer-lab.com/files/latent-diffusion/inpainting_big.zip
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
cd models/ldm/celeba256
|
17 |
+
unzip -o celeba-256.zip
|
18 |
+
|
19 |
+
cd ../ffhq256
|
20 |
+
unzip -o ffhq-256.zip
|
21 |
+
|
22 |
+
cd ../lsun_churches256
|
23 |
+
unzip -o lsun_churches-256.zip
|
24 |
+
|
25 |
+
cd ../lsun_beds256
|
26 |
+
unzip -o lsun_beds-256.zip
|
27 |
+
|
28 |
+
cd ../text2img256
|
29 |
+
unzip -o model.zip
|
30 |
+
|
31 |
+
cd ../cin256
|
32 |
+
unzip -o model.zip
|
33 |
+
|
34 |
+
cd ../semantic_synthesis512
|
35 |
+
unzip -o model.zip
|
36 |
+
|
37 |
+
cd ../semantic_synthesis256
|
38 |
+
unzip -o model.zip
|
39 |
+
|
40 |
+
cd ../bsr_sr
|
41 |
+
unzip -o model.zip
|
42 |
+
|
43 |
+
cd ../layout2img-openimages256
|
44 |
+
unzip -o model.zip
|
45 |
+
|
46 |
+
cd ../inpainting_big
|
47 |
+
unzip -o model.zip
|
48 |
+
|
49 |
+
cd ../..
|
stable-diffusion/scripts/img2img.py
ADDED
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""make variations of input image"""
|
2 |
+
|
3 |
+
import argparse, os, sys, glob
|
4 |
+
import PIL
|
5 |
+
import torch
|
6 |
+
import numpy as np
|
7 |
+
from omegaconf import OmegaConf
|
8 |
+
from PIL import Image
|
9 |
+
from tqdm import tqdm, trange
|
10 |
+
from itertools import islice
|
11 |
+
from einops import rearrange, repeat
|
12 |
+
from torchvision.utils import make_grid
|
13 |
+
from torch import autocast
|
14 |
+
from contextlib import nullcontext
|
15 |
+
import time
|
16 |
+
from pytorch_lightning import seed_everything
|
17 |
+
|
18 |
+
from ldm.util import instantiate_from_config
|
19 |
+
from ldm.models.diffusion.ddim import DDIMSampler
|
20 |
+
from ldm.models.diffusion.plms import PLMSSampler
|
21 |
+
|
22 |
+
|
23 |
+
def chunk(it, size):
|
24 |
+
it = iter(it)
|
25 |
+
return iter(lambda: tuple(islice(it, size)), ())
|
26 |
+
|
27 |
+
|
28 |
+
def load_model_from_config(config, ckpt, verbose=False):
|
29 |
+
print(f"Loading model from {ckpt}")
|
30 |
+
pl_sd = torch.load(ckpt, map_location="cpu")
|
31 |
+
if "global_step" in pl_sd:
|
32 |
+
print(f"Global Step: {pl_sd['global_step']}")
|
33 |
+
sd = pl_sd["state_dict"]
|
34 |
+
model = instantiate_from_config(config.model)
|
35 |
+
m, u = model.load_state_dict(sd, strict=False)
|
36 |
+
if len(m) > 0 and verbose:
|
37 |
+
print("missing keys:")
|
38 |
+
print(m)
|
39 |
+
if len(u) > 0 and verbose:
|
40 |
+
print("unexpected keys:")
|
41 |
+
print(u)
|
42 |
+
|
43 |
+
model.cuda()
|
44 |
+
model.eval()
|
45 |
+
return model
|
46 |
+
|
47 |
+
|
48 |
+
def load_img(path):
|
49 |
+
image = Image.open(path).convert("RGB")
|
50 |
+
w, h = image.size
|
51 |
+
print(f"loaded input image of size ({w}, {h}) from {path}")
|
52 |
+
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
|
53 |
+
image = image.resize((w, h), resample=PIL.Image.LANCZOS)
|
54 |
+
image = np.array(image).astype(np.float32) / 255.0
|
55 |
+
image = image[None].transpose(0, 3, 1, 2)
|
56 |
+
image = torch.from_numpy(image)
|
57 |
+
return 2.*image - 1.
|
58 |
+
|
59 |
+
|
60 |
+
def main():
|
61 |
+
parser = argparse.ArgumentParser()
|
62 |
+
|
63 |
+
parser.add_argument(
|
64 |
+
"--prompt",
|
65 |
+
type=str,
|
66 |
+
nargs="?",
|
67 |
+
default="a painting of a virus monster playing guitar",
|
68 |
+
help="the prompt to render"
|
69 |
+
)
|
70 |
+
|
71 |
+
parser.add_argument(
|
72 |
+
"--init-img",
|
73 |
+
type=str,
|
74 |
+
nargs="?",
|
75 |
+
help="path to the input image"
|
76 |
+
)
|
77 |
+
|
78 |
+
parser.add_argument(
|
79 |
+
"--outdir",
|
80 |
+
type=str,
|
81 |
+
nargs="?",
|
82 |
+
help="dir to write results to",
|
83 |
+
default="outputs/img2img-samples"
|
84 |
+
)
|
85 |
+
|
86 |
+
parser.add_argument(
|
87 |
+
"--skip_grid",
|
88 |
+
action='store_true',
|
89 |
+
help="do not save a grid, only individual samples. Helpful when evaluating lots of samples",
|
90 |
+
)
|
91 |
+
|
92 |
+
parser.add_argument(
|
93 |
+
"--skip_save",
|
94 |
+
action='store_true',
|
95 |
+
help="do not save indiviual samples. For speed measurements.",
|
96 |
+
)
|
97 |
+
|
98 |
+
parser.add_argument(
|
99 |
+
"--ddim_steps",
|
100 |
+
type=int,
|
101 |
+
default=50,
|
102 |
+
help="number of ddim sampling steps",
|
103 |
+
)
|
104 |
+
|
105 |
+
parser.add_argument(
|
106 |
+
"--plms",
|
107 |
+
action='store_true',
|
108 |
+
help="use plms sampling",
|
109 |
+
)
|
110 |
+
parser.add_argument(
|
111 |
+
"--fixed_code",
|
112 |
+
action='store_true',
|
113 |
+
help="if enabled, uses the same starting code across all samples ",
|
114 |
+
)
|
115 |
+
|
116 |
+
parser.add_argument(
|
117 |
+
"--ddim_eta",
|
118 |
+
type=float,
|
119 |
+
default=0.0,
|
120 |
+
help="ddim eta (eta=0.0 corresponds to deterministic sampling",
|
121 |
+
)
|
122 |
+
parser.add_argument(
|
123 |
+
"--n_iter",
|
124 |
+
type=int,
|
125 |
+
default=1,
|
126 |
+
help="sample this often",
|
127 |
+
)
|
128 |
+
parser.add_argument(
|
129 |
+
"--C",
|
130 |
+
type=int,
|
131 |
+
default=4,
|
132 |
+
help="latent channels",
|
133 |
+
)
|
134 |
+
parser.add_argument(
|
135 |
+
"--f",
|
136 |
+
type=int,
|
137 |
+
default=8,
|
138 |
+
help="downsampling factor, most often 8 or 16",
|
139 |
+
)
|
140 |
+
parser.add_argument(
|
141 |
+
"--n_samples",
|
142 |
+
type=int,
|
143 |
+
default=2,
|
144 |
+
help="how many samples to produce for each given prompt. A.k.a batch size",
|
145 |
+
)
|
146 |
+
parser.add_argument(
|
147 |
+
"--n_rows",
|
148 |
+
type=int,
|
149 |
+
default=0,
|
150 |
+
help="rows in the grid (default: n_samples)",
|
151 |
+
)
|
152 |
+
parser.add_argument(
|
153 |
+
"--scale",
|
154 |
+
type=float,
|
155 |
+
default=5.0,
|
156 |
+
help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))",
|
157 |
+
)
|
158 |
+
|
159 |
+
parser.add_argument(
|
160 |
+
"--strength",
|
161 |
+
type=float,
|
162 |
+
default=0.75,
|
163 |
+
help="strength for noising/unnoising. 1.0 corresponds to full destruction of information in init image",
|
164 |
+
)
|
165 |
+
parser.add_argument(
|
166 |
+
"--from-file",
|
167 |
+
type=str,
|
168 |
+
help="if specified, load prompts from this file",
|
169 |
+
)
|
170 |
+
parser.add_argument(
|
171 |
+
"--config",
|
172 |
+
type=str,
|
173 |
+
default="configs/stable-diffusion/v1-inference.yaml",
|
174 |
+
help="path to config which constructs model",
|
175 |
+
)
|
176 |
+
parser.add_argument(
|
177 |
+
"--ckpt",
|
178 |
+
type=str,
|
179 |
+
default="models/ldm/stable-diffusion-v1/model.ckpt",
|
180 |
+
help="path to checkpoint of model",
|
181 |
+
)
|
182 |
+
parser.add_argument(
|
183 |
+
"--seed",
|
184 |
+
type=int,
|
185 |
+
default=42,
|
186 |
+
help="the seed (for reproducible sampling)",
|
187 |
+
)
|
188 |
+
parser.add_argument(
|
189 |
+
"--precision",
|
190 |
+
type=str,
|
191 |
+
help="evaluate at this precision",
|
192 |
+
choices=["full", "autocast"],
|
193 |
+
default="autocast"
|
194 |
+
)
|
195 |
+
|
196 |
+
opt = parser.parse_args()
|
197 |
+
seed_everything(opt.seed)
|
198 |
+
|
199 |
+
config = OmegaConf.load(f"{opt.config}")
|
200 |
+
model = load_model_from_config(config, f"{opt.ckpt}")
|
201 |
+
|
202 |
+
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
203 |
+
model = model.to(device)
|
204 |
+
|
205 |
+
if opt.plms:
|
206 |
+
raise NotImplementedError("PLMS sampler not (yet) supported")
|
207 |
+
sampler = PLMSSampler(model)
|
208 |
+
else:
|
209 |
+
sampler = DDIMSampler(model)
|
210 |
+
|
211 |
+
os.makedirs(opt.outdir, exist_ok=True)
|
212 |
+
outpath = opt.outdir
|
213 |
+
|
214 |
+
batch_size = opt.n_samples
|
215 |
+
n_rows = opt.n_rows if opt.n_rows > 0 else batch_size
|
216 |
+
if not opt.from_file:
|
217 |
+
prompt = opt.prompt
|
218 |
+
assert prompt is not None
|
219 |
+
data = [batch_size * [prompt]]
|
220 |
+
|
221 |
+
else:
|
222 |
+
print(f"reading prompts from {opt.from_file}")
|
223 |
+
with open(opt.from_file, "r") as f:
|
224 |
+
data = f.read().splitlines()
|
225 |
+
data = list(chunk(data, batch_size))
|
226 |
+
|
227 |
+
sample_path = os.path.join(outpath, "samples")
|
228 |
+
os.makedirs(sample_path, exist_ok=True)
|
229 |
+
base_count = len(os.listdir(sample_path))
|
230 |
+
grid_count = len(os.listdir(outpath)) - 1
|
231 |
+
|
232 |
+
assert os.path.isfile(opt.init_img)
|
233 |
+
init_image = load_img(opt.init_img).to(device)
|
234 |
+
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
|
235 |
+
init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image)) # move to latent space
|
236 |
+
|
237 |
+
sampler.make_schedule(ddim_num_steps=opt.ddim_steps, ddim_eta=opt.ddim_eta, verbose=False)
|
238 |
+
|
239 |
+
assert 0. <= opt.strength <= 1., 'can only work with strength in [0.0, 1.0]'
|
240 |
+
t_enc = int(opt.strength * opt.ddim_steps)
|
241 |
+
print(f"target t_enc is {t_enc} steps")
|
242 |
+
|
243 |
+
precision_scope = autocast if opt.precision == "autocast" else nullcontext
|
244 |
+
with torch.no_grad():
|
245 |
+
with precision_scope("cuda"):
|
246 |
+
with model.ema_scope():
|
247 |
+
tic = time.time()
|
248 |
+
all_samples = list()
|
249 |
+
for n in trange(opt.n_iter, desc="Sampling"):
|
250 |
+
for prompts in tqdm(data, desc="data"):
|
251 |
+
uc = None
|
252 |
+
if opt.scale != 1.0:
|
253 |
+
uc = model.get_learned_conditioning(batch_size * [""])
|
254 |
+
if isinstance(prompts, tuple):
|
255 |
+
prompts = list(prompts)
|
256 |
+
c = model.get_learned_conditioning(prompts)
|
257 |
+
|
258 |
+
# encode (scaled latent)
|
259 |
+
z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc]*batch_size).to(device))
|
260 |
+
# decode it
|
261 |
+
samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=opt.scale,
|
262 |
+
unconditional_conditioning=uc,)
|
263 |
+
|
264 |
+
x_samples = model.decode_first_stage(samples)
|
265 |
+
x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
|
266 |
+
|
267 |
+
if not opt.skip_save:
|
268 |
+
for x_sample in x_samples:
|
269 |
+
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
|
270 |
+
Image.fromarray(x_sample.astype(np.uint8)).save(
|
271 |
+
os.path.join(sample_path, f"{base_count:05}.png"))
|
272 |
+
base_count += 1
|
273 |
+
all_samples.append(x_samples)
|
274 |
+
|
275 |
+
if not opt.skip_grid:
|
276 |
+
# additionally, save as grid
|
277 |
+
grid = torch.stack(all_samples, 0)
|
278 |
+
grid = rearrange(grid, 'n b c h w -> (n b) c h w')
|
279 |
+
grid = make_grid(grid, nrow=n_rows)
|
280 |
+
|
281 |
+
# to image
|
282 |
+
grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy()
|
283 |
+
Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f'grid-{grid_count:04}.png'))
|
284 |
+
grid_count += 1
|
285 |
+
|
286 |
+
toc = time.time()
|
287 |
+
|
288 |
+
print(f"Your samples are ready and waiting for you here: \n{outpath} \n"
|
289 |
+
f" \nEnjoy.")
|
290 |
+
|
291 |
+
|
292 |
+
if __name__ == "__main__":
|
293 |
+
main()
|
stable-diffusion/scripts/inpaint.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse, os, sys, glob
|
2 |
+
from omegaconf import OmegaConf
|
3 |
+
from PIL import Image
|
4 |
+
from tqdm import tqdm
|
5 |
+
import numpy as np
|
6 |
+
import torch
|
7 |
+
from main import instantiate_from_config
|
8 |
+
from ldm.models.diffusion.ddim import DDIMSampler
|
9 |
+
|
10 |
+
|
11 |
+
def make_batch(image, mask, device):
|
12 |
+
image = np.array(Image.open(image).convert("RGB"))
|
13 |
+
image = image.astype(np.float32)/255.0
|
14 |
+
image = image[None].transpose(0,3,1,2)
|
15 |
+
image = torch.from_numpy(image)
|
16 |
+
|
17 |
+
mask = np.array(Image.open(mask).convert("L"))
|
18 |
+
mask = mask.astype(np.float32)/255.0
|
19 |
+
mask = mask[None,None]
|
20 |
+
mask[mask < 0.5] = 0
|
21 |
+
mask[mask >= 0.5] = 1
|
22 |
+
mask = torch.from_numpy(mask)
|
23 |
+
|
24 |
+
masked_image = (1-mask)*image
|
25 |
+
|
26 |
+
batch = {"image": image, "mask": mask, "masked_image": masked_image}
|
27 |
+
for k in batch:
|
28 |
+
batch[k] = batch[k].to(device=device)
|
29 |
+
batch[k] = batch[k]*2.0-1.0
|
30 |
+
return batch
|
31 |
+
|
32 |
+
|
33 |
+
if __name__ == "__main__":
|
34 |
+
parser = argparse.ArgumentParser()
|
35 |
+
parser.add_argument(
|
36 |
+
"--indir",
|
37 |
+
type=str,
|
38 |
+
nargs="?",
|
39 |
+
help="dir containing image-mask pairs (`example.png` and `example_mask.png`)",
|
40 |
+
)
|
41 |
+
parser.add_argument(
|
42 |
+
"--outdir",
|
43 |
+
type=str,
|
44 |
+
nargs="?",
|
45 |
+
help="dir to write results to",
|
46 |
+
)
|
47 |
+
parser.add_argument(
|
48 |
+
"--steps",
|
49 |
+
type=int,
|
50 |
+
default=50,
|
51 |
+
help="number of ddim sampling steps",
|
52 |
+
)
|
53 |
+
opt = parser.parse_args()
|
54 |
+
|
55 |
+
masks = sorted(glob.glob(os.path.join(opt.indir, "*_mask.png")))
|
56 |
+
images = [x.replace("_mask.png", ".png") for x in masks]
|
57 |
+
print(f"Found {len(masks)} inputs.")
|
58 |
+
|
59 |
+
config = OmegaConf.load("models/ldm/inpainting_big/config.yaml")
|
60 |
+
model = instantiate_from_config(config.model)
|
61 |
+
model.load_state_dict(torch.load("models/ldm/inpainting_big/last.ckpt")["state_dict"],
|
62 |
+
strict=False)
|
63 |
+
|
64 |
+
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
65 |
+
model = model.to(device)
|
66 |
+
sampler = DDIMSampler(model)
|
67 |
+
|
68 |
+
os.makedirs(opt.outdir, exist_ok=True)
|
69 |
+
with torch.no_grad():
|
70 |
+
with model.ema_scope():
|
71 |
+
for image, mask in tqdm(zip(images, masks)):
|
72 |
+
outpath = os.path.join(opt.outdir, os.path.split(image)[1])
|
73 |
+
batch = make_batch(image, mask, device=device)
|
74 |
+
|
75 |
+
# encode masked image and concat downsampled mask
|
76 |
+
c = model.cond_stage_model.encode(batch["masked_image"])
|
77 |
+
cc = torch.nn.functional.interpolate(batch["mask"],
|
78 |
+
size=c.shape[-2:])
|
79 |
+
c = torch.cat((c, cc), dim=1)
|
80 |
+
|
81 |
+
shape = (c.shape[1]-1,)+c.shape[2:]
|
82 |
+
samples_ddim, _ = sampler.sample(S=opt.steps,
|
83 |
+
conditioning=c,
|
84 |
+
batch_size=c.shape[0],
|
85 |
+
shape=shape,
|
86 |
+
verbose=False)
|
87 |
+
x_samples_ddim = model.decode_first_stage(samples_ddim)
|
88 |
+
|
89 |
+
image = torch.clamp((batch["image"]+1.0)/2.0,
|
90 |
+
min=0.0, max=1.0)
|
91 |
+
mask = torch.clamp((batch["mask"]+1.0)/2.0,
|
92 |
+
min=0.0, max=1.0)
|
93 |
+
predicted_image = torch.clamp((x_samples_ddim+1.0)/2.0,
|
94 |
+
min=0.0, max=1.0)
|
95 |
+
|
96 |
+
inpainted = (1-mask)*image+mask*predicted_image
|
97 |
+
inpainted = inpainted.cpu().numpy().transpose(0,2,3,1)[0]*255
|
98 |
+
Image.fromarray(inpainted.astype(np.uint8)).save(outpath)
|
stable-diffusion/scripts/knn2img.py
ADDED
@@ -0,0 +1,398 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse, os, sys, glob
|
2 |
+
import clip
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
import numpy as np
|
6 |
+
from omegaconf import OmegaConf
|
7 |
+
from PIL import Image
|
8 |
+
from tqdm import tqdm, trange
|
9 |
+
from itertools import islice
|
10 |
+
from einops import rearrange, repeat
|
11 |
+
from torchvision.utils import make_grid
|
12 |
+
import scann
|
13 |
+
import time
|
14 |
+
from multiprocessing import cpu_count
|
15 |
+
|
16 |
+
from ldm.util import instantiate_from_config, parallel_data_prefetch
|
17 |
+
from ldm.models.diffusion.ddim import DDIMSampler
|
18 |
+
from ldm.models.diffusion.plms import PLMSSampler
|
19 |
+
from ldm.modules.encoders.modules import FrozenClipImageEmbedder, FrozenCLIPTextEmbedder
|
20 |
+
|
21 |
+
DATABASES = [
|
22 |
+
"openimages",
|
23 |
+
"artbench-art_nouveau",
|
24 |
+
"artbench-baroque",
|
25 |
+
"artbench-expressionism",
|
26 |
+
"artbench-impressionism",
|
27 |
+
"artbench-post_impressionism",
|
28 |
+
"artbench-realism",
|
29 |
+
"artbench-romanticism",
|
30 |
+
"artbench-renaissance",
|
31 |
+
"artbench-surrealism",
|
32 |
+
"artbench-ukiyo_e",
|
33 |
+
]
|
34 |
+
|
35 |
+
|
36 |
+
def chunk(it, size):
|
37 |
+
it = iter(it)
|
38 |
+
return iter(lambda: tuple(islice(it, size)), ())
|
39 |
+
|
40 |
+
|
41 |
+
def load_model_from_config(config, ckpt, verbose=False):
|
42 |
+
print(f"Loading model from {ckpt}")
|
43 |
+
pl_sd = torch.load(ckpt, map_location="cpu")
|
44 |
+
if "global_step" in pl_sd:
|
45 |
+
print(f"Global Step: {pl_sd['global_step']}")
|
46 |
+
sd = pl_sd["state_dict"]
|
47 |
+
model = instantiate_from_config(config.model)
|
48 |
+
m, u = model.load_state_dict(sd, strict=False)
|
49 |
+
if len(m) > 0 and verbose:
|
50 |
+
print("missing keys:")
|
51 |
+
print(m)
|
52 |
+
if len(u) > 0 and verbose:
|
53 |
+
print("unexpected keys:")
|
54 |
+
print(u)
|
55 |
+
|
56 |
+
model.cuda()
|
57 |
+
model.eval()
|
58 |
+
return model
|
59 |
+
|
60 |
+
|
61 |
+
class Searcher(object):
|
62 |
+
def __init__(self, database, retriever_version='ViT-L/14'):
|
63 |
+
assert database in DATABASES
|
64 |
+
# self.database = self.load_database(database)
|
65 |
+
self.database_name = database
|
66 |
+
self.searcher_savedir = f'data/rdm/searchers/{self.database_name}'
|
67 |
+
self.database_path = f'data/rdm/retrieval_databases/{self.database_name}'
|
68 |
+
self.retriever = self.load_retriever(version=retriever_version)
|
69 |
+
self.database = {'embedding': [],
|
70 |
+
'img_id': [],
|
71 |
+
'patch_coords': []}
|
72 |
+
self.load_database()
|
73 |
+
self.load_searcher()
|
74 |
+
|
75 |
+
def train_searcher(self, k,
|
76 |
+
metric='dot_product',
|
77 |
+
searcher_savedir=None):
|
78 |
+
|
79 |
+
print('Start training searcher')
|
80 |
+
searcher = scann.scann_ops_pybind.builder(self.database['embedding'] /
|
81 |
+
np.linalg.norm(self.database['embedding'], axis=1)[:, np.newaxis],
|
82 |
+
k, metric)
|
83 |
+
self.searcher = searcher.score_brute_force().build()
|
84 |
+
print('Finish training searcher')
|
85 |
+
|
86 |
+
if searcher_savedir is not None:
|
87 |
+
print(f'Save trained searcher under "{searcher_savedir}"')
|
88 |
+
os.makedirs(searcher_savedir, exist_ok=True)
|
89 |
+
self.searcher.serialize(searcher_savedir)
|
90 |
+
|
91 |
+
def load_single_file(self, saved_embeddings):
|
92 |
+
compressed = np.load(saved_embeddings)
|
93 |
+
self.database = {key: compressed[key] for key in compressed.files}
|
94 |
+
print('Finished loading of clip embeddings.')
|
95 |
+
|
96 |
+
def load_multi_files(self, data_archive):
|
97 |
+
out_data = {key: [] for key in self.database}
|
98 |
+
for d in tqdm(data_archive, desc=f'Loading datapool from {len(data_archive)} individual files.'):
|
99 |
+
for key in d.files:
|
100 |
+
out_data[key].append(d[key])
|
101 |
+
|
102 |
+
return out_data
|
103 |
+
|
104 |
+
def load_database(self):
|
105 |
+
|
106 |
+
print(f'Load saved patch embedding from "{self.database_path}"')
|
107 |
+
file_content = glob.glob(os.path.join(self.database_path, '*.npz'))
|
108 |
+
|
109 |
+
if len(file_content) == 1:
|
110 |
+
self.load_single_file(file_content[0])
|
111 |
+
elif len(file_content) > 1:
|
112 |
+
data = [np.load(f) for f in file_content]
|
113 |
+
prefetched_data = parallel_data_prefetch(self.load_multi_files, data,
|
114 |
+
n_proc=min(len(data), cpu_count()), target_data_type='dict')
|
115 |
+
|
116 |
+
self.database = {key: np.concatenate([od[key] for od in prefetched_data], axis=1)[0] for key in
|
117 |
+
self.database}
|
118 |
+
else:
|
119 |
+
raise ValueError(f'No npz-files in specified path "{self.database_path}" is this directory existing?')
|
120 |
+
|
121 |
+
print(f'Finished loading of retrieval database of length {self.database["embedding"].shape[0]}.')
|
122 |
+
|
123 |
+
def load_retriever(self, version='ViT-L/14', ):
|
124 |
+
model = FrozenClipImageEmbedder(model=version)
|
125 |
+
if torch.cuda.is_available():
|
126 |
+
model.cuda()
|
127 |
+
model.eval()
|
128 |
+
return model
|
129 |
+
|
130 |
+
def load_searcher(self):
|
131 |
+
print(f'load searcher for database {self.database_name} from {self.searcher_savedir}')
|
132 |
+
self.searcher = scann.scann_ops_pybind.load_searcher(self.searcher_savedir)
|
133 |
+
print('Finished loading searcher.')
|
134 |
+
|
135 |
+
def search(self, x, k):
|
136 |
+
if self.searcher is None and self.database['embedding'].shape[0] < 2e4:
|
137 |
+
self.train_searcher(k) # quickly fit searcher on the fly for small databases
|
138 |
+
assert self.searcher is not None, 'Cannot search with uninitialized searcher'
|
139 |
+
if isinstance(x, torch.Tensor):
|
140 |
+
x = x.detach().cpu().numpy()
|
141 |
+
if len(x.shape) == 3:
|
142 |
+
x = x[:, 0]
|
143 |
+
query_embeddings = x / np.linalg.norm(x, axis=1)[:, np.newaxis]
|
144 |
+
|
145 |
+
start = time.time()
|
146 |
+
nns, distances = self.searcher.search_batched(query_embeddings, final_num_neighbors=k)
|
147 |
+
end = time.time()
|
148 |
+
|
149 |
+
out_embeddings = self.database['embedding'][nns]
|
150 |
+
out_img_ids = self.database['img_id'][nns]
|
151 |
+
out_pc = self.database['patch_coords'][nns]
|
152 |
+
|
153 |
+
out = {'nn_embeddings': out_embeddings / np.linalg.norm(out_embeddings, axis=-1)[..., np.newaxis],
|
154 |
+
'img_ids': out_img_ids,
|
155 |
+
'patch_coords': out_pc,
|
156 |
+
'queries': x,
|
157 |
+
'exec_time': end - start,
|
158 |
+
'nns': nns,
|
159 |
+
'q_embeddings': query_embeddings}
|
160 |
+
|
161 |
+
return out
|
162 |
+
|
163 |
+
def __call__(self, x, n):
|
164 |
+
return self.search(x, n)
|
165 |
+
|
166 |
+
|
167 |
+
if __name__ == "__main__":
|
168 |
+
parser = argparse.ArgumentParser()
|
169 |
+
# TODO: add n_neighbors and modes (text-only, text-image-retrieval, image-image retrieval etc)
|
170 |
+
# TODO: add 'image variation' mode when knn=0 but a single image is given instead of a text prompt?
|
171 |
+
parser.add_argument(
|
172 |
+
"--prompt",
|
173 |
+
type=str,
|
174 |
+
nargs="?",
|
175 |
+
default="a painting of a virus monster playing guitar",
|
176 |
+
help="the prompt to render"
|
177 |
+
)
|
178 |
+
|
179 |
+
parser.add_argument(
|
180 |
+
"--outdir",
|
181 |
+
type=str,
|
182 |
+
nargs="?",
|
183 |
+
help="dir to write results to",
|
184 |
+
default="outputs/txt2img-samples"
|
185 |
+
)
|
186 |
+
|
187 |
+
parser.add_argument(
|
188 |
+
"--skip_grid",
|
189 |
+
action='store_true',
|
190 |
+
help="do not save a grid, only individual samples. Helpful when evaluating lots of samples",
|
191 |
+
)
|
192 |
+
|
193 |
+
parser.add_argument(
|
194 |
+
"--ddim_steps",
|
195 |
+
type=int,
|
196 |
+
default=50,
|
197 |
+
help="number of ddim sampling steps",
|
198 |
+
)
|
199 |
+
|
200 |
+
parser.add_argument(
|
201 |
+
"--n_repeat",
|
202 |
+
type=int,
|
203 |
+
default=1,
|
204 |
+
help="number of repeats in CLIP latent space",
|
205 |
+
)
|
206 |
+
|
207 |
+
parser.add_argument(
|
208 |
+
"--plms",
|
209 |
+
action='store_true',
|
210 |
+
help="use plms sampling",
|
211 |
+
)
|
212 |
+
|
213 |
+
parser.add_argument(
|
214 |
+
"--ddim_eta",
|
215 |
+
type=float,
|
216 |
+
default=0.0,
|
217 |
+
help="ddim eta (eta=0.0 corresponds to deterministic sampling",
|
218 |
+
)
|
219 |
+
parser.add_argument(
|
220 |
+
"--n_iter",
|
221 |
+
type=int,
|
222 |
+
default=1,
|
223 |
+
help="sample this often",
|
224 |
+
)
|
225 |
+
|
226 |
+
parser.add_argument(
|
227 |
+
"--H",
|
228 |
+
type=int,
|
229 |
+
default=768,
|
230 |
+
help="image height, in pixel space",
|
231 |
+
)
|
232 |
+
|
233 |
+
parser.add_argument(
|
234 |
+
"--W",
|
235 |
+
type=int,
|
236 |
+
default=768,
|
237 |
+
help="image width, in pixel space",
|
238 |
+
)
|
239 |
+
|
240 |
+
parser.add_argument(
|
241 |
+
"--n_samples",
|
242 |
+
type=int,
|
243 |
+
default=3,
|
244 |
+
help="how many samples to produce for each given prompt. A.k.a batch size",
|
245 |
+
)
|
246 |
+
|
247 |
+
parser.add_argument(
|
248 |
+
"--n_rows",
|
249 |
+
type=int,
|
250 |
+
default=0,
|
251 |
+
help="rows in the grid (default: n_samples)",
|
252 |
+
)
|
253 |
+
|
254 |
+
parser.add_argument(
|
255 |
+
"--scale",
|
256 |
+
type=float,
|
257 |
+
default=5.0,
|
258 |
+
help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))",
|
259 |
+
)
|
260 |
+
|
261 |
+
parser.add_argument(
|
262 |
+
"--from-file",
|
263 |
+
type=str,
|
264 |
+
help="if specified, load prompts from this file",
|
265 |
+
)
|
266 |
+
|
267 |
+
parser.add_argument(
|
268 |
+
"--config",
|
269 |
+
type=str,
|
270 |
+
default="configs/retrieval-augmented-diffusion/768x768.yaml",
|
271 |
+
help="path to config which constructs model",
|
272 |
+
)
|
273 |
+
|
274 |
+
parser.add_argument(
|
275 |
+
"--ckpt",
|
276 |
+
type=str,
|
277 |
+
default="models/rdm/rdm768x768/model.ckpt",
|
278 |
+
help="path to checkpoint of model",
|
279 |
+
)
|
280 |
+
|
281 |
+
parser.add_argument(
|
282 |
+
"--clip_type",
|
283 |
+
type=str,
|
284 |
+
default="ViT-L/14",
|
285 |
+
help="which CLIP model to use for retrieval and NN encoding",
|
286 |
+
)
|
287 |
+
parser.add_argument(
|
288 |
+
"--database",
|
289 |
+
type=str,
|
290 |
+
default='artbench-surrealism',
|
291 |
+
choices=DATABASES,
|
292 |
+
help="The database used for the search, only applied when --use_neighbors=True",
|
293 |
+
)
|
294 |
+
parser.add_argument(
|
295 |
+
"--use_neighbors",
|
296 |
+
default=False,
|
297 |
+
action='store_true',
|
298 |
+
help="Include neighbors in addition to text prompt for conditioning",
|
299 |
+
)
|
300 |
+
parser.add_argument(
|
301 |
+
"--knn",
|
302 |
+
default=10,
|
303 |
+
type=int,
|
304 |
+
help="The number of included neighbors, only applied when --use_neighbors=True",
|
305 |
+
)
|
306 |
+
|
307 |
+
opt = parser.parse_args()
|
308 |
+
|
309 |
+
config = OmegaConf.load(f"{opt.config}")
|
310 |
+
model = load_model_from_config(config, f"{opt.ckpt}")
|
311 |
+
|
312 |
+
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
313 |
+
model = model.to(device)
|
314 |
+
|
315 |
+
clip_text_encoder = FrozenCLIPTextEmbedder(opt.clip_type).to(device)
|
316 |
+
|
317 |
+
if opt.plms:
|
318 |
+
sampler = PLMSSampler(model)
|
319 |
+
else:
|
320 |
+
sampler = DDIMSampler(model)
|
321 |
+
|
322 |
+
os.makedirs(opt.outdir, exist_ok=True)
|
323 |
+
outpath = opt.outdir
|
324 |
+
|
325 |
+
batch_size = opt.n_samples
|
326 |
+
n_rows = opt.n_rows if opt.n_rows > 0 else batch_size
|
327 |
+
if not opt.from_file:
|
328 |
+
prompt = opt.prompt
|
329 |
+
assert prompt is not None
|
330 |
+
data = [batch_size * [prompt]]
|
331 |
+
|
332 |
+
else:
|
333 |
+
print(f"reading prompts from {opt.from_file}")
|
334 |
+
with open(opt.from_file, "r") as f:
|
335 |
+
data = f.read().splitlines()
|
336 |
+
data = list(chunk(data, batch_size))
|
337 |
+
|
338 |
+
sample_path = os.path.join(outpath, "samples")
|
339 |
+
os.makedirs(sample_path, exist_ok=True)
|
340 |
+
base_count = len(os.listdir(sample_path))
|
341 |
+
grid_count = len(os.listdir(outpath)) - 1
|
342 |
+
|
343 |
+
print(f"sampling scale for cfg is {opt.scale:.2f}")
|
344 |
+
|
345 |
+
searcher = None
|
346 |
+
if opt.use_neighbors:
|
347 |
+
searcher = Searcher(opt.database)
|
348 |
+
|
349 |
+
with torch.no_grad():
|
350 |
+
with model.ema_scope():
|
351 |
+
for n in trange(opt.n_iter, desc="Sampling"):
|
352 |
+
all_samples = list()
|
353 |
+
for prompts in tqdm(data, desc="data"):
|
354 |
+
print("sampling prompts:", prompts)
|
355 |
+
if isinstance(prompts, tuple):
|
356 |
+
prompts = list(prompts)
|
357 |
+
c = clip_text_encoder.encode(prompts)
|
358 |
+
uc = None
|
359 |
+
if searcher is not None:
|
360 |
+
nn_dict = searcher(c, opt.knn)
|
361 |
+
c = torch.cat([c, torch.from_numpy(nn_dict['nn_embeddings']).cuda()], dim=1)
|
362 |
+
if opt.scale != 1.0:
|
363 |
+
uc = torch.zeros_like(c)
|
364 |
+
if isinstance(prompts, tuple):
|
365 |
+
prompts = list(prompts)
|
366 |
+
shape = [16, opt.H // 16, opt.W // 16] # note: currently hardcoded for f16 model
|
367 |
+
samples_ddim, _ = sampler.sample(S=opt.ddim_steps,
|
368 |
+
conditioning=c,
|
369 |
+
batch_size=c.shape[0],
|
370 |
+
shape=shape,
|
371 |
+
verbose=False,
|
372 |
+
unconditional_guidance_scale=opt.scale,
|
373 |
+
unconditional_conditioning=uc,
|
374 |
+
eta=opt.ddim_eta,
|
375 |
+
)
|
376 |
+
|
377 |
+
x_samples_ddim = model.decode_first_stage(samples_ddim)
|
378 |
+
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
379 |
+
|
380 |
+
for x_sample in x_samples_ddim:
|
381 |
+
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
|
382 |
+
Image.fromarray(x_sample.astype(np.uint8)).save(
|
383 |
+
os.path.join(sample_path, f"{base_count:05}.png"))
|
384 |
+
base_count += 1
|
385 |
+
all_samples.append(x_samples_ddim)
|
386 |
+
|
387 |
+
if not opt.skip_grid:
|
388 |
+
# additionally, save as grid
|
389 |
+
grid = torch.stack(all_samples, 0)
|
390 |
+
grid = rearrange(grid, 'n b c h w -> (n b) c h w')
|
391 |
+
grid = make_grid(grid, nrow=n_rows)
|
392 |
+
|
393 |
+
# to image
|
394 |
+
grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy()
|
395 |
+
Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f'grid-{grid_count:04}.png'))
|
396 |
+
grid_count += 1
|
397 |
+
|
398 |
+
print(f"Your samples are ready and waiting for you here: \n{outpath} \nEnjoy.")
|
stable-diffusion/scripts/latent_imagenet_diffusion.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
stable-diffusion/scripts/sample_diffusion.py
ADDED
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse, os, sys, glob, datetime, yaml
|
2 |
+
import torch
|
3 |
+
import time
|
4 |
+
import numpy as np
|
5 |
+
from tqdm import trange
|
6 |
+
|
7 |
+
from omegaconf import OmegaConf
|
8 |
+
from PIL import Image
|
9 |
+
|
10 |
+
from ldm.models.diffusion.ddim import DDIMSampler
|
11 |
+
from ldm.util import instantiate_from_config
|
12 |
+
|
13 |
+
rescale = lambda x: (x + 1.) / 2.
|
14 |
+
|
15 |
+
def custom_to_pil(x):
|
16 |
+
x = x.detach().cpu()
|
17 |
+
x = torch.clamp(x, -1., 1.)
|
18 |
+
x = (x + 1.) / 2.
|
19 |
+
x = x.permute(1, 2, 0).numpy()
|
20 |
+
x = (255 * x).astype(np.uint8)
|
21 |
+
x = Image.fromarray(x)
|
22 |
+
if not x.mode == "RGB":
|
23 |
+
x = x.convert("RGB")
|
24 |
+
return x
|
25 |
+
|
26 |
+
|
27 |
+
def custom_to_np(x):
|
28 |
+
# saves the batch in adm style as in https://github.com/openai/guided-diffusion/blob/main/scripts/image_sample.py
|
29 |
+
sample = x.detach().cpu()
|
30 |
+
sample = ((sample + 1) * 127.5).clamp(0, 255).to(torch.uint8)
|
31 |
+
sample = sample.permute(0, 2, 3, 1)
|
32 |
+
sample = sample.contiguous()
|
33 |
+
return sample
|
34 |
+
|
35 |
+
|
36 |
+
def logs2pil(logs, keys=["sample"]):
|
37 |
+
imgs = dict()
|
38 |
+
for k in logs:
|
39 |
+
try:
|
40 |
+
if len(logs[k].shape) == 4:
|
41 |
+
img = custom_to_pil(logs[k][0, ...])
|
42 |
+
elif len(logs[k].shape) == 3:
|
43 |
+
img = custom_to_pil(logs[k])
|
44 |
+
else:
|
45 |
+
print(f"Unknown format for key {k}. ")
|
46 |
+
img = None
|
47 |
+
except:
|
48 |
+
img = None
|
49 |
+
imgs[k] = img
|
50 |
+
return imgs
|
51 |
+
|
52 |
+
|
53 |
+
@torch.no_grad()
|
54 |
+
def convsample(model, shape, return_intermediates=True,
|
55 |
+
verbose=True,
|
56 |
+
make_prog_row=False):
|
57 |
+
|
58 |
+
|
59 |
+
if not make_prog_row:
|
60 |
+
return model.p_sample_loop(None, shape,
|
61 |
+
return_intermediates=return_intermediates, verbose=verbose)
|
62 |
+
else:
|
63 |
+
return model.progressive_denoising(
|
64 |
+
None, shape, verbose=True
|
65 |
+
)
|
66 |
+
|
67 |
+
|
68 |
+
@torch.no_grad()
|
69 |
+
def convsample_ddim(model, steps, shape, eta=1.0
|
70 |
+
):
|
71 |
+
ddim = DDIMSampler(model)
|
72 |
+
bs = shape[0]
|
73 |
+
shape = shape[1:]
|
74 |
+
samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, eta=eta, verbose=False,)
|
75 |
+
return samples, intermediates
|
76 |
+
|
77 |
+
|
78 |
+
@torch.no_grad()
|
79 |
+
def make_convolutional_sample(model, batch_size, vanilla=False, custom_steps=None, eta=1.0,):
|
80 |
+
|
81 |
+
|
82 |
+
log = dict()
|
83 |
+
|
84 |
+
shape = [batch_size,
|
85 |
+
model.model.diffusion_model.in_channels,
|
86 |
+
model.model.diffusion_model.image_size,
|
87 |
+
model.model.diffusion_model.image_size]
|
88 |
+
|
89 |
+
with model.ema_scope("Plotting"):
|
90 |
+
t0 = time.time()
|
91 |
+
if vanilla:
|
92 |
+
sample, progrow = convsample(model, shape,
|
93 |
+
make_prog_row=True)
|
94 |
+
else:
|
95 |
+
sample, intermediates = convsample_ddim(model, steps=custom_steps, shape=shape,
|
96 |
+
eta=eta)
|
97 |
+
|
98 |
+
t1 = time.time()
|
99 |
+
|
100 |
+
x_sample = model.decode_first_stage(sample)
|
101 |
+
|
102 |
+
log["sample"] = x_sample
|
103 |
+
log["time"] = t1 - t0
|
104 |
+
log['throughput'] = sample.shape[0] / (t1 - t0)
|
105 |
+
print(f'Throughput for this batch: {log["throughput"]}')
|
106 |
+
return log
|
107 |
+
|
108 |
+
def run(model, logdir, batch_size=50, vanilla=False, custom_steps=None, eta=None, n_samples=50000, nplog=None):
|
109 |
+
if vanilla:
|
110 |
+
print(f'Using Vanilla DDPM sampling with {model.num_timesteps} sampling steps.')
|
111 |
+
else:
|
112 |
+
print(f'Using DDIM sampling with {custom_steps} sampling steps and eta={eta}')
|
113 |
+
|
114 |
+
|
115 |
+
tstart = time.time()
|
116 |
+
n_saved = len(glob.glob(os.path.join(logdir,'*.png')))-1
|
117 |
+
# path = logdir
|
118 |
+
if model.cond_stage_model is None:
|
119 |
+
all_images = []
|
120 |
+
|
121 |
+
print(f"Running unconditional sampling for {n_samples} samples")
|
122 |
+
for _ in trange(n_samples // batch_size, desc="Sampling Batches (unconditional)"):
|
123 |
+
logs = make_convolutional_sample(model, batch_size=batch_size,
|
124 |
+
vanilla=vanilla, custom_steps=custom_steps,
|
125 |
+
eta=eta)
|
126 |
+
n_saved = save_logs(logs, logdir, n_saved=n_saved, key="sample")
|
127 |
+
all_images.extend([custom_to_np(logs["sample"])])
|
128 |
+
if n_saved >= n_samples:
|
129 |
+
print(f'Finish after generating {n_saved} samples')
|
130 |
+
break
|
131 |
+
all_img = np.concatenate(all_images, axis=0)
|
132 |
+
all_img = all_img[:n_samples]
|
133 |
+
shape_str = "x".join([str(x) for x in all_img.shape])
|
134 |
+
nppath = os.path.join(nplog, f"{shape_str}-samples.npz")
|
135 |
+
np.savez(nppath, all_img)
|
136 |
+
|
137 |
+
else:
|
138 |
+
raise NotImplementedError('Currently only sampling for unconditional models supported.')
|
139 |
+
|
140 |
+
print(f"sampling of {n_saved} images finished in {(time.time() - tstart) / 60.:.2f} minutes.")
|
141 |
+
|
142 |
+
|
143 |
+
def save_logs(logs, path, n_saved=0, key="sample", np_path=None):
|
144 |
+
for k in logs:
|
145 |
+
if k == key:
|
146 |
+
batch = logs[key]
|
147 |
+
if np_path is None:
|
148 |
+
for x in batch:
|
149 |
+
img = custom_to_pil(x)
|
150 |
+
imgpath = os.path.join(path, f"{key}_{n_saved:06}.png")
|
151 |
+
img.save(imgpath)
|
152 |
+
n_saved += 1
|
153 |
+
else:
|
154 |
+
npbatch = custom_to_np(batch)
|
155 |
+
shape_str = "x".join([str(x) for x in npbatch.shape])
|
156 |
+
nppath = os.path.join(np_path, f"{n_saved}-{shape_str}-samples.npz")
|
157 |
+
np.savez(nppath, npbatch)
|
158 |
+
n_saved += npbatch.shape[0]
|
159 |
+
return n_saved
|
160 |
+
|
161 |
+
|
162 |
+
def get_parser():
|
163 |
+
parser = argparse.ArgumentParser()
|
164 |
+
parser.add_argument(
|
165 |
+
"-r",
|
166 |
+
"--resume",
|
167 |
+
type=str,
|
168 |
+
nargs="?",
|
169 |
+
help="load from logdir or checkpoint in logdir",
|
170 |
+
)
|
171 |
+
parser.add_argument(
|
172 |
+
"-n",
|
173 |
+
"--n_samples",
|
174 |
+
type=int,
|
175 |
+
nargs="?",
|
176 |
+
help="number of samples to draw",
|
177 |
+
default=50000
|
178 |
+
)
|
179 |
+
parser.add_argument(
|
180 |
+
"-e",
|
181 |
+
"--eta",
|
182 |
+
type=float,
|
183 |
+
nargs="?",
|
184 |
+
help="eta for ddim sampling (0.0 yields deterministic sampling)",
|
185 |
+
default=1.0
|
186 |
+
)
|
187 |
+
parser.add_argument(
|
188 |
+
"-v",
|
189 |
+
"--vanilla_sample",
|
190 |
+
default=False,
|
191 |
+
action='store_true',
|
192 |
+
help="vanilla sampling (default option is DDIM sampling)?",
|
193 |
+
)
|
194 |
+
parser.add_argument(
|
195 |
+
"-l",
|
196 |
+
"--logdir",
|
197 |
+
type=str,
|
198 |
+
nargs="?",
|
199 |
+
help="extra logdir",
|
200 |
+
default="none"
|
201 |
+
)
|
202 |
+
parser.add_argument(
|
203 |
+
"-c",
|
204 |
+
"--custom_steps",
|
205 |
+
type=int,
|
206 |
+
nargs="?",
|
207 |
+
help="number of steps for ddim and fastdpm sampling",
|
208 |
+
default=50
|
209 |
+
)
|
210 |
+
parser.add_argument(
|
211 |
+
"--batch_size",
|
212 |
+
type=int,
|
213 |
+
nargs="?",
|
214 |
+
help="the bs",
|
215 |
+
default=10
|
216 |
+
)
|
217 |
+
return parser
|
218 |
+
|
219 |
+
|
220 |
+
def load_model_from_config(config, sd):
|
221 |
+
model = instantiate_from_config(config)
|
222 |
+
model.load_state_dict(sd,strict=False)
|
223 |
+
model.cuda()
|
224 |
+
model.eval()
|
225 |
+
return model
|
226 |
+
|
227 |
+
|
228 |
+
def load_model(config, ckpt, gpu, eval_mode):
|
229 |
+
if ckpt:
|
230 |
+
print(f"Loading model from {ckpt}")
|
231 |
+
pl_sd = torch.load(ckpt, map_location="cpu")
|
232 |
+
global_step = pl_sd["global_step"]
|
233 |
+
else:
|
234 |
+
pl_sd = {"state_dict": None}
|
235 |
+
global_step = None
|
236 |
+
model = load_model_from_config(config.model,
|
237 |
+
pl_sd["state_dict"])
|
238 |
+
|
239 |
+
return model, global_step
|
240 |
+
|
241 |
+
|
242 |
+
if __name__ == "__main__":
|
243 |
+
now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
|
244 |
+
sys.path.append(os.getcwd())
|
245 |
+
command = " ".join(sys.argv)
|
246 |
+
|
247 |
+
parser = get_parser()
|
248 |
+
opt, unknown = parser.parse_known_args()
|
249 |
+
ckpt = None
|
250 |
+
|
251 |
+
if not os.path.exists(opt.resume):
|
252 |
+
raise ValueError("Cannot find {}".format(opt.resume))
|
253 |
+
if os.path.isfile(opt.resume):
|
254 |
+
# paths = opt.resume.split("/")
|
255 |
+
try:
|
256 |
+
logdir = '/'.join(opt.resume.split('/')[:-1])
|
257 |
+
# idx = len(paths)-paths[::-1].index("logs")+1
|
258 |
+
print(f'Logdir is {logdir}')
|
259 |
+
except ValueError:
|
260 |
+
paths = opt.resume.split("/")
|
261 |
+
idx = -2 # take a guess: path/to/logdir/checkpoints/model.ckpt
|
262 |
+
logdir = "/".join(paths[:idx])
|
263 |
+
ckpt = opt.resume
|
264 |
+
else:
|
265 |
+
assert os.path.isdir(opt.resume), f"{opt.resume} is not a directory"
|
266 |
+
logdir = opt.resume.rstrip("/")
|
267 |
+
ckpt = os.path.join(logdir, "model.ckpt")
|
268 |
+
|
269 |
+
base_configs = sorted(glob.glob(os.path.join(logdir, "config.yaml")))
|
270 |
+
opt.base = base_configs
|
271 |
+
|
272 |
+
configs = [OmegaConf.load(cfg) for cfg in opt.base]
|
273 |
+
cli = OmegaConf.from_dotlist(unknown)
|
274 |
+
config = OmegaConf.merge(*configs, cli)
|
275 |
+
|
276 |
+
gpu = True
|
277 |
+
eval_mode = True
|
278 |
+
|
279 |
+
if opt.logdir != "none":
|
280 |
+
locallog = logdir.split(os.sep)[-1]
|
281 |
+
if locallog == "": locallog = logdir.split(os.sep)[-2]
|
282 |
+
print(f"Switching logdir from '{logdir}' to '{os.path.join(opt.logdir, locallog)}'")
|
283 |
+
logdir = os.path.join(opt.logdir, locallog)
|
284 |
+
|
285 |
+
print(config)
|
286 |
+
|
287 |
+
model, global_step = load_model(config, ckpt, gpu, eval_mode)
|
288 |
+
print(f"global step: {global_step}")
|
289 |
+
print(75 * "=")
|
290 |
+
print("logging to:")
|
291 |
+
logdir = os.path.join(logdir, "samples", f"{global_step:08}", now)
|
292 |
+
imglogdir = os.path.join(logdir, "img")
|
293 |
+
numpylogdir = os.path.join(logdir, "numpy")
|
294 |
+
|
295 |
+
os.makedirs(imglogdir)
|
296 |
+
os.makedirs(numpylogdir)
|
297 |
+
print(logdir)
|
298 |
+
print(75 * "=")
|
299 |
+
|
300 |
+
# write config out
|
301 |
+
sampling_file = os.path.join(logdir, "sampling_config.yaml")
|
302 |
+
sampling_conf = vars(opt)
|
303 |
+
|
304 |
+
with open(sampling_file, 'w') as f:
|
305 |
+
yaml.dump(sampling_conf, f, default_flow_style=False)
|
306 |
+
print(sampling_conf)
|
307 |
+
|
308 |
+
|
309 |
+
run(model, imglogdir, eta=opt.eta,
|
310 |
+
vanilla=opt.vanilla_sample, n_samples=opt.n_samples, custom_steps=opt.custom_steps,
|
311 |
+
batch_size=opt.batch_size, nplog=numpylogdir)
|
312 |
+
|
313 |
+
print("done.")
|
stable-diffusion/scripts/tests/test_watermark.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import fire
|
3 |
+
from imwatermark import WatermarkDecoder
|
4 |
+
|
5 |
+
|
6 |
+
def testit(img_path):
|
7 |
+
bgr = cv2.imread(img_path)
|
8 |
+
decoder = WatermarkDecoder('bytes', 136)
|
9 |
+
watermark = decoder.decode(bgr, 'dwtDct')
|
10 |
+
try:
|
11 |
+
dec = watermark.decode('utf-8')
|
12 |
+
except:
|
13 |
+
dec = "null"
|
14 |
+
print(dec)
|
15 |
+
|
16 |
+
|
17 |
+
if __name__ == "__main__":
|
18 |
+
fire.Fire(testit)
|
stable-diffusion/scripts/train_searcher.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os, sys
|
2 |
+
import numpy as np
|
3 |
+
import scann
|
4 |
+
import argparse
|
5 |
+
import glob
|
6 |
+
from multiprocessing import cpu_count
|
7 |
+
from tqdm import tqdm
|
8 |
+
|
9 |
+
from ldm.util import parallel_data_prefetch
|
10 |
+
|
11 |
+
|
12 |
+
def search_bruteforce(searcher):
|
13 |
+
return searcher.score_brute_force().build()
|
14 |
+
|
15 |
+
|
16 |
+
def search_partioned_ah(searcher, dims_per_block, aiq_threshold, reorder_k,
|
17 |
+
partioning_trainsize, num_leaves, num_leaves_to_search):
|
18 |
+
return searcher.tree(num_leaves=num_leaves,
|
19 |
+
num_leaves_to_search=num_leaves_to_search,
|
20 |
+
training_sample_size=partioning_trainsize). \
|
21 |
+
score_ah(dims_per_block, anisotropic_quantization_threshold=aiq_threshold).reorder(reorder_k).build()
|
22 |
+
|
23 |
+
|
24 |
+
def search_ah(searcher, dims_per_block, aiq_threshold, reorder_k):
|
25 |
+
return searcher.score_ah(dims_per_block, anisotropic_quantization_threshold=aiq_threshold).reorder(
|
26 |
+
reorder_k).build()
|
27 |
+
|
28 |
+
def load_datapool(dpath):
|
29 |
+
|
30 |
+
|
31 |
+
def load_single_file(saved_embeddings):
|
32 |
+
compressed = np.load(saved_embeddings)
|
33 |
+
database = {key: compressed[key] for key in compressed.files}
|
34 |
+
return database
|
35 |
+
|
36 |
+
def load_multi_files(data_archive):
|
37 |
+
database = {key: [] for key in data_archive[0].files}
|
38 |
+
for d in tqdm(data_archive, desc=f'Loading datapool from {len(data_archive)} individual files.'):
|
39 |
+
for key in d.files:
|
40 |
+
database[key].append(d[key])
|
41 |
+
|
42 |
+
return database
|
43 |
+
|
44 |
+
print(f'Load saved patch embedding from "{dpath}"')
|
45 |
+
file_content = glob.glob(os.path.join(dpath, '*.npz'))
|
46 |
+
|
47 |
+
if len(file_content) == 1:
|
48 |
+
data_pool = load_single_file(file_content[0])
|
49 |
+
elif len(file_content) > 1:
|
50 |
+
data = [np.load(f) for f in file_content]
|
51 |
+
prefetched_data = parallel_data_prefetch(load_multi_files, data,
|
52 |
+
n_proc=min(len(data), cpu_count()), target_data_type='dict')
|
53 |
+
|
54 |
+
data_pool = {key: np.concatenate([od[key] for od in prefetched_data], axis=1)[0] for key in prefetched_data[0].keys()}
|
55 |
+
else:
|
56 |
+
raise ValueError(f'No npz-files in specified path "{dpath}" is this directory existing?')
|
57 |
+
|
58 |
+
print(f'Finished loading of retrieval database of length {data_pool["embedding"].shape[0]}.')
|
59 |
+
return data_pool
|
60 |
+
|
61 |
+
|
62 |
+
def train_searcher(opt,
|
63 |
+
metric='dot_product',
|
64 |
+
partioning_trainsize=None,
|
65 |
+
reorder_k=None,
|
66 |
+
# todo tune
|
67 |
+
aiq_thld=0.2,
|
68 |
+
dims_per_block=2,
|
69 |
+
num_leaves=None,
|
70 |
+
num_leaves_to_search=None,):
|
71 |
+
|
72 |
+
data_pool = load_datapool(opt.database)
|
73 |
+
k = opt.knn
|
74 |
+
|
75 |
+
if not reorder_k:
|
76 |
+
reorder_k = 2 * k
|
77 |
+
|
78 |
+
# normalize
|
79 |
+
# embeddings =
|
80 |
+
searcher = scann.scann_ops_pybind.builder(data_pool['embedding'] / np.linalg.norm(data_pool['embedding'], axis=1)[:, np.newaxis], k, metric)
|
81 |
+
pool_size = data_pool['embedding'].shape[0]
|
82 |
+
|
83 |
+
print(*(['#'] * 100))
|
84 |
+
print('Initializing scaNN searcher with the following values:')
|
85 |
+
print(f'k: {k}')
|
86 |
+
print(f'metric: {metric}')
|
87 |
+
print(f'reorder_k: {reorder_k}')
|
88 |
+
print(f'anisotropic_quantization_threshold: {aiq_thld}')
|
89 |
+
print(f'dims_per_block: {dims_per_block}')
|
90 |
+
print(*(['#'] * 100))
|
91 |
+
print('Start training searcher....')
|
92 |
+
print(f'N samples in pool is {pool_size}')
|
93 |
+
|
94 |
+
# this reflects the recommended design choices proposed at
|
95 |
+
# https://github.com/google-research/google-research/blob/aca5f2e44e301af172590bb8e65711f0c9ee0cfd/scann/docs/algorithms.md
|
96 |
+
if pool_size < 2e4:
|
97 |
+
print('Using brute force search.')
|
98 |
+
searcher = search_bruteforce(searcher)
|
99 |
+
elif 2e4 <= pool_size and pool_size < 1e5:
|
100 |
+
print('Using asymmetric hashing search and reordering.')
|
101 |
+
searcher = search_ah(searcher, dims_per_block, aiq_thld, reorder_k)
|
102 |
+
else:
|
103 |
+
print('Using using partioning, asymmetric hashing search and reordering.')
|
104 |
+
|
105 |
+
if not partioning_trainsize:
|
106 |
+
partioning_trainsize = data_pool['embedding'].shape[0] // 10
|
107 |
+
if not num_leaves:
|
108 |
+
num_leaves = int(np.sqrt(pool_size))
|
109 |
+
|
110 |
+
if not num_leaves_to_search:
|
111 |
+
num_leaves_to_search = max(num_leaves // 20, 1)
|
112 |
+
|
113 |
+
print('Partitioning params:')
|
114 |
+
print(f'num_leaves: {num_leaves}')
|
115 |
+
print(f'num_leaves_to_search: {num_leaves_to_search}')
|
116 |
+
# self.searcher = self.search_ah(searcher, dims_per_block, aiq_thld, reorder_k)
|
117 |
+
searcher = search_partioned_ah(searcher, dims_per_block, aiq_thld, reorder_k,
|
118 |
+
partioning_trainsize, num_leaves, num_leaves_to_search)
|
119 |
+
|
120 |
+
print('Finish training searcher')
|
121 |
+
searcher_savedir = opt.target_path
|
122 |
+
os.makedirs(searcher_savedir, exist_ok=True)
|
123 |
+
searcher.serialize(searcher_savedir)
|
124 |
+
print(f'Saved trained searcher under "{searcher_savedir}"')
|
125 |
+
|
126 |
+
if __name__ == '__main__':
|
127 |
+
sys.path.append(os.getcwd())
|
128 |
+
parser = argparse.ArgumentParser()
|
129 |
+
parser.add_argument('--database',
|
130 |
+
'-d',
|
131 |
+
default='data/rdm/retrieval_databases/openimages',
|
132 |
+
type=str,
|
133 |
+
help='path to folder containing the clip feature of the database')
|
134 |
+
parser.add_argument('--target_path',
|
135 |
+
'-t',
|
136 |
+
default='data/rdm/searchers/openimages',
|
137 |
+
type=str,
|
138 |
+
help='path to the target folder where the searcher shall be stored.')
|
139 |
+
parser.add_argument('--knn',
|
140 |
+
'-k',
|
141 |
+
default=20,
|
142 |
+
type=int,
|
143 |
+
help='number of nearest neighbors, for which the searcher shall be optimized')
|
144 |
+
|
145 |
+
opt, _ = parser.parse_known_args()
|
146 |
+
|
147 |
+
train_searcher(opt,)
|
stable-diffusion/scripts/txt2img.py
ADDED
@@ -0,0 +1,352 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse, os, sys, glob
|
2 |
+
import cv2
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
from omegaconf import OmegaConf
|
6 |
+
from PIL import Image
|
7 |
+
from tqdm import tqdm, trange
|
8 |
+
from imwatermark import WatermarkEncoder
|
9 |
+
from itertools import islice
|
10 |
+
from einops import rearrange
|
11 |
+
from torchvision.utils import make_grid
|
12 |
+
import time
|
13 |
+
from pytorch_lightning import seed_everything
|
14 |
+
from torch import autocast
|
15 |
+
from contextlib import contextmanager, nullcontext
|
16 |
+
|
17 |
+
from ldm.util import instantiate_from_config
|
18 |
+
from ldm.models.diffusion.ddim import DDIMSampler
|
19 |
+
from ldm.models.diffusion.plms import PLMSSampler
|
20 |
+
from ldm.models.diffusion.dpm_solver import DPMSolverSampler
|
21 |
+
|
22 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
23 |
+
from transformers import AutoFeatureExtractor
|
24 |
+
|
25 |
+
|
26 |
+
# load safety model
|
27 |
+
safety_model_id = "CompVis/stable-diffusion-safety-checker"
|
28 |
+
safety_feature_extractor = AutoFeatureExtractor.from_pretrained(safety_model_id)
|
29 |
+
safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id)
|
30 |
+
|
31 |
+
|
32 |
+
def chunk(it, size):
|
33 |
+
it = iter(it)
|
34 |
+
return iter(lambda: tuple(islice(it, size)), ())
|
35 |
+
|
36 |
+
|
37 |
+
def numpy_to_pil(images):
|
38 |
+
"""
|
39 |
+
Convert a numpy image or a batch of images to a PIL image.
|
40 |
+
"""
|
41 |
+
if images.ndim == 3:
|
42 |
+
images = images[None, ...]
|
43 |
+
images = (images * 255).round().astype("uint8")
|
44 |
+
pil_images = [Image.fromarray(image) for image in images]
|
45 |
+
|
46 |
+
return pil_images
|
47 |
+
|
48 |
+
|
49 |
+
def load_model_from_config(config, ckpt, verbose=False):
|
50 |
+
print(f"Loading model from {ckpt}")
|
51 |
+
pl_sd = torch.load(ckpt, map_location="cpu")
|
52 |
+
if "global_step" in pl_sd:
|
53 |
+
print(f"Global Step: {pl_sd['global_step']}")
|
54 |
+
sd = pl_sd["state_dict"]
|
55 |
+
model = instantiate_from_config(config.model)
|
56 |
+
m, u = model.load_state_dict(sd, strict=False)
|
57 |
+
if len(m) > 0 and verbose:
|
58 |
+
print("missing keys:")
|
59 |
+
print(m)
|
60 |
+
if len(u) > 0 and verbose:
|
61 |
+
print("unexpected keys:")
|
62 |
+
print(u)
|
63 |
+
|
64 |
+
model.cuda()
|
65 |
+
model.eval()
|
66 |
+
return model
|
67 |
+
|
68 |
+
|
69 |
+
def put_watermark(img, wm_encoder=None):
|
70 |
+
if wm_encoder is not None:
|
71 |
+
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
|
72 |
+
img = wm_encoder.encode(img, 'dwtDct')
|
73 |
+
img = Image.fromarray(img[:, :, ::-1])
|
74 |
+
return img
|
75 |
+
|
76 |
+
|
77 |
+
def load_replacement(x):
|
78 |
+
try:
|
79 |
+
hwc = x.shape
|
80 |
+
y = Image.open("assets/rick.jpeg").convert("RGB").resize((hwc[1], hwc[0]))
|
81 |
+
y = (np.array(y)/255.0).astype(x.dtype)
|
82 |
+
assert y.shape == x.shape
|
83 |
+
return y
|
84 |
+
except Exception:
|
85 |
+
return x
|
86 |
+
|
87 |
+
|
88 |
+
def check_safety(x_image):
|
89 |
+
safety_checker_input = safety_feature_extractor(numpy_to_pil(x_image), return_tensors="pt")
|
90 |
+
x_checked_image, has_nsfw_concept = safety_checker(images=x_image, clip_input=safety_checker_input.pixel_values)
|
91 |
+
assert x_checked_image.shape[0] == len(has_nsfw_concept)
|
92 |
+
for i in range(len(has_nsfw_concept)):
|
93 |
+
if has_nsfw_concept[i]:
|
94 |
+
x_checked_image[i] = load_replacement(x_checked_image[i])
|
95 |
+
return x_checked_image, has_nsfw_concept
|
96 |
+
|
97 |
+
|
98 |
+
def main():
|
99 |
+
parser = argparse.ArgumentParser()
|
100 |
+
|
101 |
+
parser.add_argument(
|
102 |
+
"--prompt",
|
103 |
+
type=str,
|
104 |
+
nargs="?",
|
105 |
+
default="a painting of a virus monster playing guitar",
|
106 |
+
help="the prompt to render"
|
107 |
+
)
|
108 |
+
parser.add_argument(
|
109 |
+
"--outdir",
|
110 |
+
type=str,
|
111 |
+
nargs="?",
|
112 |
+
help="dir to write results to",
|
113 |
+
default="outputs/txt2img-samples"
|
114 |
+
)
|
115 |
+
parser.add_argument(
|
116 |
+
"--skip_grid",
|
117 |
+
action='store_true',
|
118 |
+
help="do not save a grid, only individual samples. Helpful when evaluating lots of samples",
|
119 |
+
)
|
120 |
+
parser.add_argument(
|
121 |
+
"--skip_save",
|
122 |
+
action='store_true',
|
123 |
+
help="do not save individual samples. For speed measurements.",
|
124 |
+
)
|
125 |
+
parser.add_argument(
|
126 |
+
"--ddim_steps",
|
127 |
+
type=int,
|
128 |
+
default=50,
|
129 |
+
help="number of ddim sampling steps",
|
130 |
+
)
|
131 |
+
parser.add_argument(
|
132 |
+
"--plms",
|
133 |
+
action='store_true',
|
134 |
+
help="use plms sampling",
|
135 |
+
)
|
136 |
+
parser.add_argument(
|
137 |
+
"--dpm_solver",
|
138 |
+
action='store_true',
|
139 |
+
help="use dpm_solver sampling",
|
140 |
+
)
|
141 |
+
parser.add_argument(
|
142 |
+
"--laion400m",
|
143 |
+
action='store_true',
|
144 |
+
help="uses the LAION400M model",
|
145 |
+
)
|
146 |
+
parser.add_argument(
|
147 |
+
"--fixed_code",
|
148 |
+
action='store_true',
|
149 |
+
help="if enabled, uses the same starting code across samples ",
|
150 |
+
)
|
151 |
+
parser.add_argument(
|
152 |
+
"--ddim_eta",
|
153 |
+
type=float,
|
154 |
+
default=0.0,
|
155 |
+
help="ddim eta (eta=0.0 corresponds to deterministic sampling",
|
156 |
+
)
|
157 |
+
parser.add_argument(
|
158 |
+
"--n_iter",
|
159 |
+
type=int,
|
160 |
+
default=2,
|
161 |
+
help="sample this often",
|
162 |
+
)
|
163 |
+
parser.add_argument(
|
164 |
+
"--H",
|
165 |
+
type=int,
|
166 |
+
default=512,
|
167 |
+
help="image height, in pixel space",
|
168 |
+
)
|
169 |
+
parser.add_argument(
|
170 |
+
"--W",
|
171 |
+
type=int,
|
172 |
+
default=512,
|
173 |
+
help="image width, in pixel space",
|
174 |
+
)
|
175 |
+
parser.add_argument(
|
176 |
+
"--C",
|
177 |
+
type=int,
|
178 |
+
default=4,
|
179 |
+
help="latent channels",
|
180 |
+
)
|
181 |
+
parser.add_argument(
|
182 |
+
"--f",
|
183 |
+
type=int,
|
184 |
+
default=8,
|
185 |
+
help="downsampling factor",
|
186 |
+
)
|
187 |
+
parser.add_argument(
|
188 |
+
"--n_samples",
|
189 |
+
type=int,
|
190 |
+
default=3,
|
191 |
+
help="how many samples to produce for each given prompt. A.k.a. batch size",
|
192 |
+
)
|
193 |
+
parser.add_argument(
|
194 |
+
"--n_rows",
|
195 |
+
type=int,
|
196 |
+
default=0,
|
197 |
+
help="rows in the grid (default: n_samples)",
|
198 |
+
)
|
199 |
+
parser.add_argument(
|
200 |
+
"--scale",
|
201 |
+
type=float,
|
202 |
+
default=7.5,
|
203 |
+
help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))",
|
204 |
+
)
|
205 |
+
parser.add_argument(
|
206 |
+
"--from-file",
|
207 |
+
type=str,
|
208 |
+
help="if specified, load prompts from this file",
|
209 |
+
)
|
210 |
+
parser.add_argument(
|
211 |
+
"--config",
|
212 |
+
type=str,
|
213 |
+
default="configs/stable-diffusion/v1-inference.yaml",
|
214 |
+
help="path to config which constructs model",
|
215 |
+
)
|
216 |
+
parser.add_argument(
|
217 |
+
"--ckpt",
|
218 |
+
type=str,
|
219 |
+
default="models/ldm/stable-diffusion-v1/model.ckpt",
|
220 |
+
help="path to checkpoint of model",
|
221 |
+
)
|
222 |
+
parser.add_argument(
|
223 |
+
"--seed",
|
224 |
+
type=int,
|
225 |
+
default=42,
|
226 |
+
help="the seed (for reproducible sampling)",
|
227 |
+
)
|
228 |
+
parser.add_argument(
|
229 |
+
"--precision",
|
230 |
+
type=str,
|
231 |
+
help="evaluate at this precision",
|
232 |
+
choices=["full", "autocast"],
|
233 |
+
default="autocast"
|
234 |
+
)
|
235 |
+
opt = parser.parse_args()
|
236 |
+
|
237 |
+
if opt.laion400m:
|
238 |
+
print("Falling back to LAION 400M model...")
|
239 |
+
opt.config = "configs/latent-diffusion/txt2img-1p4B-eval.yaml"
|
240 |
+
opt.ckpt = "models/ldm/text2img-large/model.ckpt"
|
241 |
+
opt.outdir = "outputs/txt2img-samples-laion400m"
|
242 |
+
|
243 |
+
seed_everything(opt.seed)
|
244 |
+
|
245 |
+
config = OmegaConf.load(f"{opt.config}")
|
246 |
+
model = load_model_from_config(config, f"{opt.ckpt}")
|
247 |
+
|
248 |
+
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
249 |
+
model = model.to(device)
|
250 |
+
|
251 |
+
if opt.dpm_solver:
|
252 |
+
sampler = DPMSolverSampler(model)
|
253 |
+
elif opt.plms:
|
254 |
+
sampler = PLMSSampler(model)
|
255 |
+
else:
|
256 |
+
sampler = DDIMSampler(model)
|
257 |
+
|
258 |
+
os.makedirs(opt.outdir, exist_ok=True)
|
259 |
+
outpath = opt.outdir
|
260 |
+
|
261 |
+
print("Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...")
|
262 |
+
wm = "StableDiffusionV1"
|
263 |
+
wm_encoder = WatermarkEncoder()
|
264 |
+
wm_encoder.set_watermark('bytes', wm.encode('utf-8'))
|
265 |
+
|
266 |
+
batch_size = opt.n_samples
|
267 |
+
n_rows = opt.n_rows if opt.n_rows > 0 else batch_size
|
268 |
+
if not opt.from_file:
|
269 |
+
prompt = opt.prompt
|
270 |
+
assert prompt is not None
|
271 |
+
data = [batch_size * [prompt]]
|
272 |
+
|
273 |
+
else:
|
274 |
+
print(f"reading prompts from {opt.from_file}")
|
275 |
+
with open(opt.from_file, "r") as f:
|
276 |
+
data = f.read().splitlines()
|
277 |
+
data = list(chunk(data, batch_size))
|
278 |
+
|
279 |
+
sample_path = os.path.join(outpath, "samples")
|
280 |
+
os.makedirs(sample_path, exist_ok=True)
|
281 |
+
base_count = len(os.listdir(sample_path))
|
282 |
+
grid_count = len(os.listdir(outpath)) - 1
|
283 |
+
|
284 |
+
start_code = None
|
285 |
+
if opt.fixed_code:
|
286 |
+
start_code = torch.randn([opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device)
|
287 |
+
|
288 |
+
precision_scope = autocast if opt.precision=="autocast" else nullcontext
|
289 |
+
with torch.no_grad():
|
290 |
+
with precision_scope("cuda"):
|
291 |
+
with model.ema_scope():
|
292 |
+
tic = time.time()
|
293 |
+
all_samples = list()
|
294 |
+
for n in trange(opt.n_iter, desc="Sampling"):
|
295 |
+
for prompts in tqdm(data, desc="data"):
|
296 |
+
uc = None
|
297 |
+
if opt.scale != 1.0:
|
298 |
+
uc = model.get_learned_conditioning(batch_size * [""])
|
299 |
+
if isinstance(prompts, tuple):
|
300 |
+
prompts = list(prompts)
|
301 |
+
c = model.get_learned_conditioning(prompts)
|
302 |
+
shape = [opt.C, opt.H // opt.f, opt.W // opt.f]
|
303 |
+
samples_ddim, _ = sampler.sample(S=opt.ddim_steps,
|
304 |
+
conditioning=c,
|
305 |
+
batch_size=opt.n_samples,
|
306 |
+
shape=shape,
|
307 |
+
verbose=False,
|
308 |
+
unconditional_guidance_scale=opt.scale,
|
309 |
+
unconditional_conditioning=uc,
|
310 |
+
eta=opt.ddim_eta,
|
311 |
+
x_T=start_code)
|
312 |
+
|
313 |
+
x_samples_ddim = model.decode_first_stage(samples_ddim)
|
314 |
+
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
315 |
+
x_samples_ddim = x_samples_ddim.cpu().permute(0, 2, 3, 1).numpy()
|
316 |
+
|
317 |
+
x_checked_image, has_nsfw_concept = check_safety(x_samples_ddim)
|
318 |
+
|
319 |
+
x_checked_image_torch = torch.from_numpy(x_checked_image).permute(0, 3, 1, 2)
|
320 |
+
|
321 |
+
if not opt.skip_save:
|
322 |
+
for x_sample in x_checked_image_torch:
|
323 |
+
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
|
324 |
+
img = Image.fromarray(x_sample.astype(np.uint8))
|
325 |
+
img = put_watermark(img, wm_encoder)
|
326 |
+
img.save(os.path.join(sample_path, f"{base_count:05}.png"))
|
327 |
+
base_count += 1
|
328 |
+
|
329 |
+
if not opt.skip_grid:
|
330 |
+
all_samples.append(x_checked_image_torch)
|
331 |
+
|
332 |
+
if not opt.skip_grid:
|
333 |
+
# additionally, save as grid
|
334 |
+
grid = torch.stack(all_samples, 0)
|
335 |
+
grid = rearrange(grid, 'n b c h w -> (n b) c h w')
|
336 |
+
grid = make_grid(grid, nrow=n_rows)
|
337 |
+
|
338 |
+
# to image
|
339 |
+
grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy()
|
340 |
+
img = Image.fromarray(grid.astype(np.uint8))
|
341 |
+
img = put_watermark(img, wm_encoder)
|
342 |
+
img.save(os.path.join(outpath, f'grid-{grid_count:04}.png'))
|
343 |
+
grid_count += 1
|
344 |
+
|
345 |
+
toc = time.time()
|
346 |
+
|
347 |
+
print(f"Your samples are ready and waiting for you here: \n{outpath} \n"
|
348 |
+
f" \nEnjoy.")
|
349 |
+
|
350 |
+
|
351 |
+
if __name__ == "__main__":
|
352 |
+
main()
|