Datasets:

ArXiv:
Tags:
art
License:
schirrmacher commited on
Commit
4810ae5
1 Parent(s): b2a4f53

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. util/ic-light.py +443 -0
  2. util/merge_images.py +34 -11
  3. util/to_ground_truth.py +51 -0
util/ic-light.py ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import math
3
+ import random
4
+ import string
5
+ import numpy as np
6
+ import torch
7
+ import safetensors.torch as sf
8
+ import albumentations as A
9
+ import cv2
10
+ from diffusers.utils import load_image
11
+
12
+ from PIL import Image, ImageFilter, ImageOps
13
+ from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, StableDiffusionLatentUpscalePipeline
14
+ from diffusers import AutoencoderKL, UNet2DConditionModel, DDIMScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler
15
+ from diffusers.models.attention_processor import AttnProcessor2_0
16
+ from transformers import CLIPTextModel, CLIPTokenizer
17
+ from enum import Enum
18
+ # from torch.hub import download_url_to_file
19
+
20
+
21
+ # 'stablediffusionapi/realistic-vision-v51'
22
+ # 'runwayml/stable-diffusion-v1-5'
23
+ sd15_name = 'stablediffusionapi/realistic-vision-v51'
24
+ tokenizer = CLIPTokenizer.from_pretrained(sd15_name, subfolder="tokenizer")
25
+ text_encoder = CLIPTextModel.from_pretrained(sd15_name, subfolder="text_encoder")
26
+ vae = AutoencoderKL.from_pretrained(sd15_name, subfolder="vae")
27
+ unet = UNet2DConditionModel.from_pretrained(sd15_name, subfolder="unet")
28
+ upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16)
29
+
30
+ # Change UNet
31
+
32
+ with torch.no_grad():
33
+ new_conv_in = torch.nn.Conv2d(8, unet.conv_in.out_channels, unet.conv_in.kernel_size, unet.conv_in.stride, unet.conv_in.padding)
34
+ new_conv_in.weight.zero_()
35
+ new_conv_in.weight[:, :4, :, :].copy_(unet.conv_in.weight)
36
+ new_conv_in.bias = unet.conv_in.bias
37
+ unet.conv_in = new_conv_in
38
+
39
+ unet_original_forward = unet.forward
40
+
41
+
42
+ def hooked_unet_forward(sample, timestep, encoder_hidden_states, **kwargs):
43
+ c_concat = kwargs['cross_attention_kwargs']['concat_conds'].to(sample)
44
+ c_concat = torch.cat([c_concat] * (sample.shape[0] // c_concat.shape[0]), dim=0)
45
+ new_sample = torch.cat([sample, c_concat], dim=1)
46
+ kwargs['cross_attention_kwargs'] = {}
47
+ return unet_original_forward(new_sample, timestep, encoder_hidden_states, **kwargs)
48
+
49
+
50
+ unet.forward = hooked_unet_forward
51
+
52
+ # Load
53
+
54
+ model_path = './models/iclight_sd15_fc.safetensors'
55
+ # download_url_to_file(url='https://huggingface.co/lllyasviel/ic-light/resolve/main/iclight_sd15_fc.safetensors', dst=model_path)
56
+ sd_offset = sf.load_file(model_path)
57
+ sd_origin = unet.state_dict()
58
+ keys = sd_origin.keys()
59
+ sd_merged = {k: sd_origin[k] + sd_offset[k] for k in sd_origin.keys()}
60
+ unet.load_state_dict(sd_merged, strict=True)
61
+ del sd_offset, sd_origin, sd_merged, keys
62
+
63
+ # Device
64
+
65
+ device = torch.device('cuda')
66
+ text_encoder = text_encoder.to(device=device, dtype=torch.float16)
67
+ vae = vae.to(device=device, dtype=torch.bfloat16)
68
+ unet = unet.to(device=device, dtype=torch.float16)
69
+
70
+ # SDP
71
+
72
+ unet.set_attn_processor(AttnProcessor2_0())
73
+ vae.set_attn_processor(AttnProcessor2_0())
74
+
75
+ # Samplers
76
+
77
+ ddim_scheduler = DDIMScheduler(
78
+ num_train_timesteps=1000,
79
+ beta_start=0.00085,
80
+ beta_end=0.012,
81
+ beta_schedule="scaled_linear",
82
+ clip_sample=False,
83
+ set_alpha_to_one=False,
84
+ steps_offset=1,
85
+ )
86
+
87
+ euler_a_scheduler = EulerAncestralDiscreteScheduler(
88
+ num_train_timesteps=1000,
89
+ beta_start=0.00085,
90
+ beta_end=0.012,
91
+ steps_offset=1
92
+ )
93
+
94
+ dpmpp_2m_sde_karras_scheduler = DPMSolverMultistepScheduler(
95
+ num_train_timesteps=1000,
96
+ beta_start=0.00085,
97
+ beta_end=0.012,
98
+ algorithm_type="sde-dpmsolver++",
99
+ use_karras_sigmas=True,
100
+ steps_offset=1
101
+ )
102
+
103
+ # Pipelines
104
+
105
+ t2i_pipe = StableDiffusionPipeline(
106
+ vae=vae,
107
+ text_encoder=text_encoder,
108
+ tokenizer=tokenizer,
109
+ unet=unet,
110
+ scheduler=dpmpp_2m_sde_karras_scheduler,
111
+ safety_checker=None,
112
+ requires_safety_checker=False,
113
+ feature_extractor=None,
114
+ image_encoder=None
115
+ )
116
+
117
+ i2i_pipe = StableDiffusionImg2ImgPipeline(
118
+ vae=vae,
119
+ text_encoder=text_encoder,
120
+ tokenizer=tokenizer,
121
+ unet=unet,
122
+ scheduler=dpmpp_2m_sde_karras_scheduler,
123
+ safety_checker=None,
124
+ requires_safety_checker=False,
125
+ feature_extractor=None,
126
+ image_encoder=None
127
+ )
128
+
129
+
130
+ @torch.inference_mode()
131
+ def encode_prompt_inner(txt: str):
132
+ max_length = tokenizer.model_max_length
133
+ chunk_length = tokenizer.model_max_length - 2
134
+ id_start = tokenizer.bos_token_id
135
+ id_end = tokenizer.eos_token_id
136
+ id_pad = id_end
137
+
138
+ def pad(x, p, i):
139
+ return x[:i] if len(x) >= i else x + [p] * (i - len(x))
140
+
141
+ tokens = tokenizer(txt, truncation=False, add_special_tokens=False)["input_ids"]
142
+ chunks = [[id_start] + tokens[i: i + chunk_length] + [id_end] for i in range(0, len(tokens), chunk_length)]
143
+ chunks = [pad(ck, id_pad, max_length) for ck in chunks]
144
+
145
+ token_ids = torch.tensor(chunks).to(device=device, dtype=torch.int64)
146
+ conds = text_encoder(token_ids).last_hidden_state
147
+
148
+ return conds
149
+
150
+
151
+ @torch.inference_mode()
152
+ def encode_prompt_pair(positive_prompt, negative_prompt):
153
+ c = encode_prompt_inner(positive_prompt)
154
+ uc = encode_prompt_inner(negative_prompt)
155
+
156
+ c_len = float(len(c))
157
+ uc_len = float(len(uc))
158
+ max_count = max(c_len, uc_len)
159
+ c_repeat = int(math.ceil(max_count / c_len))
160
+ uc_repeat = int(math.ceil(max_count / uc_len))
161
+ max_chunk = max(len(c), len(uc))
162
+
163
+ c = torch.cat([c] * c_repeat, dim=0)[:max_chunk]
164
+ uc = torch.cat([uc] * uc_repeat, dim=0)[:max_chunk]
165
+
166
+ c = torch.cat([p[None, ...] for p in c], dim=1)
167
+ uc = torch.cat([p[None, ...] for p in uc], dim=1)
168
+
169
+ return c, uc
170
+
171
+
172
+ @torch.inference_mode()
173
+ def pytorch2numpy(imgs, quant=True):
174
+ results = []
175
+ for x in imgs:
176
+ y = x.movedim(0, -1)
177
+
178
+ if quant:
179
+ y = y * 127.5 + 127.5
180
+ y = y.detach().float().cpu().numpy().clip(0, 255).astype(np.uint8)
181
+ else:
182
+ y = y * 0.5 + 0.5
183
+ y = y.detach().float().cpu().numpy().clip(0, 1).astype(np.float32)
184
+
185
+ results.append(y)
186
+ return results
187
+
188
+
189
+ @torch.inference_mode()
190
+ def numpy2pytorch(imgs):
191
+ h = torch.from_numpy(np.stack(imgs, axis=0)).float() / 127.0 - 1.0 # so that 127 must be strictly 0.0
192
+ h = h.movedim(-1, 1)
193
+ return h
194
+
195
+
196
+ def resize_and_center_crop(image, target_width, target_height):
197
+ pil_image = Image.fromarray(image)
198
+ original_width, original_height = pil_image.size
199
+ scale_factor = max(target_width / original_width, target_height / original_height)
200
+ resized_width = int(round(original_width * scale_factor))
201
+ resized_height = int(round(original_height * scale_factor))
202
+ resized_image = pil_image.resize((resized_width, resized_height), Image.LANCZOS)
203
+ left = (resized_width - target_width) / 2
204
+ top = (resized_height - target_height) / 2
205
+ right = (resized_width + target_width) / 2
206
+ bottom = (resized_height + target_height) / 2
207
+ cropped_image = resized_image.crop((left, top, right, bottom))
208
+ return np.array(cropped_image)
209
+
210
+
211
+ def resize_without_crop(image, target_width, target_height):
212
+ pil_image = Image.fromarray(image)
213
+ resized_image = pil_image.resize((target_width, target_height), Image.LANCZOS)
214
+ return np.array(resized_image)
215
+
216
+ def remove_alpha_threshold(image, alpha_threshold=160):
217
+ # This function removes artifacts created by LayerDiffusion
218
+ mask = image[:, :, 3] < alpha_threshold
219
+ image[mask] = [0, 0, 0, 0]
220
+ return image
221
+
222
+ @torch.inference_mode()
223
+ def process(input_fg, prompt, image_width, image_height, num_samples, seed, steps, a_prompt, n_prompt, cfg, highres_scale, highres_denoise, lowres_denoise, bg_source):
224
+ bg_source = BGSource(bg_source)
225
+ input_bg = None
226
+
227
+ if bg_source == BGSource.NONE:
228
+ pass
229
+ elif bg_source == BGSource.LEFT:
230
+ gradient = np.linspace(255, 0, image_width)
231
+ image = np.tile(gradient, (image_height, 1))
232
+ input_bg = np.stack((image,) * 3, axis=-1).astype(np.uint8)
233
+ elif bg_source == BGSource.RIGHT:
234
+ gradient = np.linspace(0, 255, image_width)
235
+ image = np.tile(gradient, (image_height, 1))
236
+ input_bg = np.stack((image,) * 3, axis=-1).astype(np.uint8)
237
+ elif bg_source == BGSource.TOP:
238
+ gradient = np.linspace(255, 0, image_height)[:, None]
239
+ image = np.tile(gradient, (1, image_width))
240
+ input_bg = np.stack((image,) * 3, axis=-1).astype(np.uint8)
241
+ elif bg_source == BGSource.BOTTOM:
242
+ gradient = np.linspace(0, 255, image_height)[:, None]
243
+ image = np.tile(gradient, (1, image_width))
244
+ input_bg = np.stack((image,) * 3, axis=-1).astype(np.uint8)
245
+ else:
246
+ raise 'Wrong initial latent!'
247
+
248
+ rng = torch.Generator(device=device).manual_seed(int(seed))
249
+
250
+ fg = resize_and_center_crop(input_fg, image_width, image_height)
251
+
252
+ concat_conds = numpy2pytorch([fg]).to(device=vae.device, dtype=vae.dtype)
253
+ concat_conds = vae.encode(concat_conds).latent_dist.mode() * vae.config.scaling_factor
254
+
255
+ conds, unconds = encode_prompt_pair(positive_prompt=prompt + ', ' + a_prompt, negative_prompt=n_prompt)
256
+
257
+ if input_bg is None:
258
+ latents = t2i_pipe(
259
+ prompt_embeds=conds,
260
+ negative_prompt_embeds=unconds,
261
+ width=image_width,
262
+ height=image_height,
263
+ num_inference_steps=steps,
264
+ num_images_per_prompt=num_samples,
265
+ generator=rng,
266
+ output_type='latent',
267
+ guidance_scale=cfg,
268
+ cross_attention_kwargs={'concat_conds': concat_conds},
269
+ ).images.to(vae.dtype) / vae.config.scaling_factor
270
+ else:
271
+ bg = resize_and_center_crop(input_bg, image_width, image_height)
272
+ bg_latent = numpy2pytorch([bg]).to(device=vae.device, dtype=vae.dtype)
273
+ bg_latent = vae.encode(bg_latent).latent_dist.mode() * vae.config.scaling_factor
274
+ latents = i2i_pipe(
275
+ image=bg_latent,
276
+ strength=lowres_denoise,
277
+ prompt_embeds=conds,
278
+ negative_prompt_embeds=unconds,
279
+ width=image_width,
280
+ height=image_height,
281
+ num_inference_steps=int(round(steps / lowres_denoise)),
282
+ num_images_per_prompt=num_samples,
283
+ generator=rng,
284
+ output_type='latent',
285
+ guidance_scale=cfg,
286
+ cross_attention_kwargs={'concat_conds': concat_conds},
287
+ ).images.to(vae.dtype) / vae.config.scaling_factor
288
+
289
+ pixels = vae.decode(latents).sample
290
+ pixels = pytorch2numpy(pixels)
291
+ pixels = [resize_without_crop(
292
+ image=p,
293
+ target_width=int(round(image_width * highres_scale / 64.0) * 64),
294
+ target_height=int(round(image_height * highres_scale / 64.0) * 64))
295
+ for p in pixels]
296
+
297
+ pixels = numpy2pytorch(pixels).to(device=vae.device, dtype=vae.dtype)
298
+ latents = vae.encode(pixels).latent_dist.mode() * vae.config.scaling_factor
299
+ latents = latents.to(device=unet.device, dtype=unet.dtype)
300
+
301
+ image_height, image_width = latents.shape[2] * 8, latents.shape[3] * 8
302
+
303
+ fg = resize_and_center_crop(input_fg, image_width, image_height)
304
+ concat_conds = numpy2pytorch([fg]).to(device=vae.device, dtype=vae.dtype)
305
+ concat_conds = vae.encode(concat_conds).latent_dist.mode() * vae.config.scaling_factor
306
+
307
+ latents = i2i_pipe(
308
+ image=latents,
309
+ strength=highres_denoise,
310
+ prompt_embeds=conds,
311
+ negative_prompt_embeds=unconds,
312
+ width=image_width,
313
+ height=image_height,
314
+ num_inference_steps=int(round(steps / highres_denoise)),
315
+ num_images_per_prompt=num_samples,
316
+ generator=rng,
317
+ output_type='latent',
318
+ guidance_scale=cfg,
319
+ cross_attention_kwargs={'concat_conds': concat_conds},
320
+ ).images.to(vae.dtype) / vae.config.scaling_factor
321
+
322
+ pixels = vae.decode(latents).sample
323
+
324
+ return pytorch2numpy(pixels)
325
+
326
+
327
+ def augment(image):
328
+
329
+ original = image.copy()
330
+
331
+ image_height, image_width, _ = original.shape
332
+
333
+ if random.choice([True, False]):
334
+ target_height, target_width = 640 * 2, 512 * 2
335
+ else:
336
+ target_height, target_width = 512 * 2, 640 * 2
337
+
338
+ left_right_padding = (max(target_width, image_width) - min(target_width, image_width)) // 2
339
+
340
+ original = cv2.copyMakeBorder(
341
+ original,
342
+ top=max(target_height, image_height) - min(target_height, image_height),
343
+ bottom=0,
344
+ left=left_right_padding,
345
+ right=left_right_padding,
346
+ borderType=cv2.BORDER_CONSTANT,
347
+ value=(0, 0, 0)
348
+ )
349
+
350
+ transform = A.Compose(
351
+ [
352
+ A.HorizontalFlip(p=0.5),
353
+ A.ShiftScaleRotate(
354
+ shift_limit_x=(-0.2, 0.2),
355
+ shift_limit_y=(0.0, 0.2),
356
+ scale_limit=(0, 0),
357
+ rotate_limit=(-2, 2),
358
+ border_mode=cv2.BORDER_CONSTANT,
359
+ p=0.5,
360
+ ),
361
+ ]
362
+ )
363
+
364
+ return transform(image=original)["image"]
365
+
366
+ class BGSource(Enum):
367
+ NONE = "None"
368
+ LEFT = "Left Light"
369
+ RIGHT = "Right Light"
370
+ TOP = "Top Light"
371
+ BOTTOM = "Bottom Light"
372
+
373
+
374
+ input_dir = "/mnt/g/My Drive/humans/humans/"
375
+ output_dir = "dataset"
376
+ ground_truth_dir = os.path.join(output_dir, "gr")
377
+ image_dir = os.path.join(output_dir, "im")
378
+
379
+ prompts = [
380
+ "sunshine, cafe, chilled",
381
+ "exhibition, paintings",
382
+ "beach",
383
+ "winter, snow"
384
+ "forrest, cloudy",
385
+ "party, people",
386
+ "cozy living room, sofa, shelf",
387
+ "mountains",
388
+ "nature, landscape",
389
+ "city centre, busy",
390
+ "neighbourhood, street, cars",
391
+ "bright sun from behind, sunset, dark",
392
+ "appartment, soft light",
393
+ "garden",
394
+ "school",
395
+ "art exhibition with paintings in background"
396
+ ]
397
+
398
+ os.makedirs(ground_truth_dir, exist_ok=True)
399
+ os.makedirs(image_dir, exist_ok=True)
400
+
401
+ all_images = os.listdir(input_dir)
402
+ random.shuffle(all_images)
403
+
404
+ for filename in all_images:
405
+ if filename.lower().endswith(('.png', '.jpg', '.jpeg')): # Check if the file is an image
406
+
407
+ letters = string.ascii_lowercase
408
+ random_string = "".join(random.choice(letters) for i in range(13))
409
+ random_filename = f"{random_string}_{filename}"
410
+
411
+ image_path = os.path.join(input_dir, filename)
412
+ image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
413
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
414
+ mask = image[:, :, 3] < 100
415
+ image[mask] = [0, 0, 0, 0]
416
+
417
+ image = cv2.GaussianBlur(image, (5, 5), 0)
418
+ image = np.array(image)
419
+
420
+ image_augmented = augment(image)
421
+ Image.fromarray(image_augmented).getchannel("A").save(os.path.join(ground_truth_dir, random_filename))
422
+
423
+ image_augmented = image_augmented[:, :, :3]
424
+
425
+ # We half the size and width because SD 1.5 creates much better results then
426
+ image_augmented = image_augmented[::2, ::2]
427
+ image_height, image_width, _ = image_augmented.shape
428
+
429
+ num_samples = 1
430
+ seed = random.randint(1,123456789012345678901234567890)
431
+ steps = 25
432
+ constant_prompt = "details, high quality"
433
+ prompt = random.choice(prompts)
434
+ n_prompt = "bad quality, blurry"
435
+ cfg = 2.0
436
+ highres_scale = 2.0
437
+ highres_denoise = 0.7
438
+ lowres_denoise = 0.5
439
+ bg_source = BGSource.NONE
440
+
441
+ results = process(image_augmented, constant_prompt, image_width, image_height, num_samples, seed, steps, prompt, n_prompt, cfg, highres_scale, highres_denoise, lowres_denoise, bg_source)
442
+ result_image = Image.fromarray(results[0])
443
+ result_image.save(os.path.join(image_dir, random_filename))‚
util/merge_images.py CHANGED
@@ -9,8 +9,8 @@ import albumentations as A
9
  def augment_final_image(image):
10
  transform = A.Compose(
11
  [
12
- A.MotionBlur(blur_limit=(5, 11), p=1.0),
13
- A.GaussNoise(var_limit=(10, 150), p=1.0),
14
  A.ColorJitter(
15
  brightness=(0.6, 1.0),
16
  contrast=(0.6, 1.0),
@@ -23,7 +23,7 @@ def augment_final_image(image):
23
  fog_coef_upper=0.2,
24
  alpha_coef=0.08,
25
  always_apply=False,
26
- p=0.5,
27
  ),
28
  A.RandomShadow(
29
  shadow_roi=(0, 0.5, 1, 1),
@@ -32,7 +32,7 @@ def augment_final_image(image):
32
  num_shadows_upper=None,
33
  shadow_dimension=5,
34
  always_apply=False,
35
- p=0.5,
36
  ),
37
  A.RandomToneCurve(scale=0.1, always_apply=False, p=0.5),
38
  ]
@@ -40,6 +40,24 @@ def augment_final_image(image):
40
  return transform(image=image)["image"]
41
 
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  def remove_alpha_threshold(image, alpha_threshold=160):
44
  # This function removes artifacts created by LayerDiffusion
45
  mask = image[:, :, 3] < alpha_threshold
@@ -48,7 +66,6 @@ def remove_alpha_threshold(image, alpha_threshold=160):
48
 
49
 
50
  def create_ground_truth_mask(image):
51
- image = remove_alpha_threshold(image.copy())
52
  return image[:, :, 3]
53
 
54
 
@@ -66,19 +83,23 @@ def scale_image(image, factor=1.5):
66
 
67
  def augment_and_match_size(image, target_width, target_height):
68
 
69
- random_scale = random.uniform(1, 1.5)
70
- image = scale_image(image, random_scale)
 
 
71
 
72
  transform = A.Compose(
73
  [
 
 
74
  A.HorizontalFlip(p=0.5),
75
  A.ShiftScaleRotate(
76
  shift_limit_x=(-0.3, 0.3),
77
- shift_limit_y=(0.0, 0.4),
78
  scale_limit=(0, 0),
79
- border_mode=cv2.BORDER_CONSTANT,
80
  rotate_limit=(-5, 5),
81
- p=0.7,
 
82
  ),
83
  ]
84
  )
@@ -102,7 +123,6 @@ def augment_and_match_size(image, target_width, target_height):
102
  delta_h = max(0, target_height - current_height)
103
  top, bottom = delta_h // 2, delta_h - (delta_h // 2)
104
  left, right = delta_w // 2, delta_w - (delta_w // 2)
105
- color = [0, 0, 0, 0]
106
  image = cv2.copyMakeBorder(
107
  image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color
108
  )
@@ -149,6 +169,9 @@ def create_training_data(
149
  if segmentation.shape[2] < 4:
150
  raise Exception(f"Image does not have an alpha channel: {segmentation_path}")
151
 
 
 
 
152
  file_name = create_random_filename_from_filepath(segmentation_path)
153
  image_path = os.path.join(image_path, file_name)
154
  ground_truth_path = os.path.join(ground_truth_path, file_name)
 
9
  def augment_final_image(image):
10
  transform = A.Compose(
11
  [
12
+ A.MotionBlur(blur_limit=(3, 11), p=0.05),
13
+ A.GaussNoise(var_limit=(1, 10), p=0.2),
14
  A.ColorJitter(
15
  brightness=(0.6, 1.0),
16
  contrast=(0.6, 1.0),
 
23
  fog_coef_upper=0.2,
24
  alpha_coef=0.08,
25
  always_apply=False,
26
+ p=0.2,
27
  ),
28
  A.RandomShadow(
29
  shadow_roi=(0, 0.5, 1, 1),
 
32
  num_shadows_upper=None,
33
  shadow_dimension=5,
34
  always_apply=False,
35
+ p=0.2,
36
  ),
37
  A.RandomToneCurve(scale=0.1, always_apply=False, p=0.5),
38
  ]
 
40
  return transform(image=image)["image"]
41
 
42
 
43
+ def augment_background(image):
44
+ transform = A.Compose(
45
+ [
46
+ A.RandomBrightnessContrast(brightness_limit=(-0.4, 0.0), p=0.2),
47
+ A.RandomShadow(
48
+ shadow_roi=(0, 0.7, 1, 1),
49
+ num_shadows_limit=(1, 5),
50
+ num_shadows_lower=None,
51
+ num_shadows_upper=None,
52
+ shadow_dimension=5,
53
+ always_apply=False,
54
+ p=1.0,
55
+ ),
56
+ ]
57
+ )
58
+ return transform(image=image)["image"]
59
+
60
+
61
  def remove_alpha_threshold(image, alpha_threshold=160):
62
  # This function removes artifacts created by LayerDiffusion
63
  mask = image[:, :, 3] < alpha_threshold
 
66
 
67
 
68
  def create_ground_truth_mask(image):
 
69
  return image[:, :, 3]
70
 
71
 
 
83
 
84
  def augment_and_match_size(image, target_width, target_height):
85
 
86
+ color = [0, 0, 0, 0]
87
+ image = cv2.copyMakeBorder(
88
+ image, 200, 200, 200, 200, cv2.BORDER_CONSTANT, value=color
89
+ )
90
 
91
  transform = A.Compose(
92
  [
93
+ A.LongestMaxSize(max_size=max(target_width, target_height), p=1.0),
94
+ A.RandomScale(scale_limit=(-0.7, 0.5)),
95
  A.HorizontalFlip(p=0.5),
96
  A.ShiftScaleRotate(
97
  shift_limit_x=(-0.3, 0.3),
98
+ shift_limit_y=(0.0, 0.5),
99
  scale_limit=(0, 0),
 
100
  rotate_limit=(-5, 5),
101
+ border_mode=cv2.BORDER_CONSTANT,
102
+ p=0.5,
103
  ),
104
  ]
105
  )
 
123
  delta_h = max(0, target_height - current_height)
124
  top, bottom = delta_h // 2, delta_h - (delta_h // 2)
125
  left, right = delta_w // 2, delta_w - (delta_w // 2)
 
126
  image = cv2.copyMakeBorder(
127
  image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color
128
  )
 
169
  if segmentation.shape[2] < 4:
170
  raise Exception(f"Image does not have an alpha channel: {segmentation_path}")
171
 
172
+ background = augment_background(background)
173
+ segmentation = remove_alpha_threshold(segmentation)
174
+
175
  file_name = create_random_filename_from_filepath(segmentation_path)
176
  image_path = os.path.join(image_path, file_name)
177
  ground_truth_path = os.path.join(ground_truth_path, file_name)
util/to_ground_truth.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This script takes RGBA masks and exports the alpha channel as ground truth
2
+ # as another image.
3
+
4
+ import os
5
+ import argparse
6
+ from PIL import Image
7
+
8
+
9
+ def extract_alpha_channel(input_folder, output_folder):
10
+ os.makedirs(output_folder, exist_ok=True)
11
+
12
+ for filename in os.listdir(input_folder):
13
+ if filename.endswith(".png"):
14
+ img_path = os.path.join(input_folder, filename)
15
+ img = Image.open(img_path)
16
+
17
+ if img.mode == "RGBA":
18
+ alpha = img.split()[-1]
19
+
20
+ alpha_output_path = os.path.join(output_folder, f"{filename}")
21
+ alpha.save(alpha_output_path)
22
+ print(f"Saved alpha channel for {filename} to {alpha_output_path}")
23
+ else:
24
+ print(f"Image {filename} does not have an alpha channel.")
25
+
26
+
27
+ def main():
28
+ parser = argparse.ArgumentParser(
29
+ description="Extract alpha channels from PNG images."
30
+ )
31
+ parser.add_argument(
32
+ "input_folder", type=str, help="Path to the input folder containing PNG images."
33
+ )
34
+ parser.add_argument(
35
+ "output_folder",
36
+ type=str,
37
+ help="Path to the output folder where alpha channels will be saved.",
38
+ )
39
+
40
+ args = parser.parse_args()
41
+
42
+ # Ensure the input and output folders are not the same
43
+ if os.path.abspath(args.input_folder) == os.path.abspath(args.output_folder):
44
+ print("Error: Input and output folders must be different.")
45
+ return
46
+
47
+ extract_alpha_channel(args.input_folder, args.output_folder)
48
+
49
+
50
+ if __name__ == "__main__":
51
+ main()