|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import cv2
|
|
import numpy as np
|
|
import torch
|
|
from PIL import Image
|
|
from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
|
|
|
|
|
|
class Light_Shadow_Remover():
|
|
def __init__(self, config):
|
|
self.device = config.device
|
|
self.cfg_image = 1.5
|
|
self.cfg_text = 1.0
|
|
|
|
pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained(
|
|
config.light_remover_ckpt_path,
|
|
torch_dtype=torch.float16,
|
|
safety_checker=None,
|
|
)
|
|
pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(pipeline.scheduler.config)
|
|
pipeline.set_progress_bar_config(disable=True)
|
|
|
|
self.pipeline = pipeline.to(self.device, torch.float16)
|
|
|
|
@torch.no_grad()
|
|
def __call__(self, image):
|
|
|
|
image = image.resize((512, 512))
|
|
|
|
if image.mode == 'RGBA':
|
|
image_array = np.array(image)
|
|
alpha_channel = image_array[:, :, 3]
|
|
erosion_size = 3
|
|
kernel = np.ones((erosion_size, erosion_size), np.uint8)
|
|
alpha_channel = cv2.erode(alpha_channel, kernel, iterations=1)
|
|
image_array[alpha_channel == 0, :3] = 255
|
|
image_array[:, :, 3] = alpha_channel
|
|
image = Image.fromarray(image_array)
|
|
|
|
image_tensor = torch.tensor(np.array(image) / 255.0).to(self.device)
|
|
alpha = image_tensor[:, :, 3:]
|
|
rgb_target = image_tensor[:, :, :3]
|
|
else:
|
|
image_tensor = torch.tensor(np.array(image) / 255.0).to(self.device)
|
|
alpha = torch.ones_like(image_tensor)[:, :, :1]
|
|
rgb_target = image_tensor[:, :, :3]
|
|
|
|
image = image.convert('RGB')
|
|
|
|
image = self.pipeline(
|
|
prompt="",
|
|
image=image,
|
|
generator=torch.manual_seed(42),
|
|
height=512,
|
|
width=512,
|
|
num_inference_steps=50,
|
|
image_guidance_scale=self.cfg_image,
|
|
guidance_scale=self.cfg_text,
|
|
).images[0]
|
|
|
|
return image
|
|
|