Spaces:
Running
on
Zero
Running
on
Zero
import torch | |
import cv2 | |
import numpy as np | |
from torchvision.transforms.functional import to_tensor, center_crop, resize | |
from PIL import Image | |
from EngageEngine.pipeline import EngagePipeline | |
from diffusers import ( | |
EulerAncestralDiscreteScheduler, | |
AutoencoderKL, ControlNetModel, | |
) | |
def process_sketch(x, im_size=(1024, 1024), sketch_detail=0.5, sketch_softness=0.5): | |
x_b = Image.new("RGBA", x.size, "WHITE") | |
x_b.paste(x, mask=x) | |
x = to_tensor(x_b.convert('RGB')).unsqueeze(0) | |
x = center_crop(x, x.shape[-1]) | |
x = resize(x, im_size) | |
u_th = (1 - sketch_detail) * 190 + 10 | |
l_th = (1 - sketch_detail) ** (sketch_softness * 8 + 1) * 185 + 5 | |
edges = [cv2.Canny(x[i].mul(255).permute(1, 2, 0).numpy().astype(np.uint8), | |
u_th, l_th, L2gradient=True) for i in range(len(x))] | |
edges = torch.stack([torch.tensor(e).div(255).unsqueeze(0) for e in edges], dim=0) | |
edges = torch.concatenate([edges, edges, edges], dim=1) | |
return edges | |
def process_mask(x, mask, im_size=(1024, 1024)): | |
x = to_tensor(x.convert('RGB')).unsqueeze(0) | |
x = center_crop(x, x.shape[-1]) | |
x = resize(x, im_size) | |
mask = to_tensor(mask.convert('L')).unsqueeze(0) | |
mask = center_crop(mask, mask.shape[-1]) | |
mask = resize(mask, im_size) | |
return x, mask | |
def fetch_model(): | |
# Load VAE component | |
vae = AutoencoderKL.from_pretrained( | |
"madebyollin/sdxl-vae-fp16-fix", | |
torch_dtype=torch.float16 | |
) | |
controlnet = ControlNetModel.from_pretrained( | |
"diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 | |
) | |
# Configure the pipeline | |
pipe = EngagePipeline.from_pretrained( | |
"dataautogpt3/ProteusV0.4-Lightning", | |
vae=vae, | |
controlnet=controlnet, | |
torch_dtype=torch.float16 | |
) | |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) | |
pipe.load_lora_weights("EngageEngine/ENGAGE_LORA.safetensors", adapter_name="ENGAGE_LORA") | |
pipe.load_lora_weights("EngageEngine/FILM_LORA.safetensors", adapter_name="FILM_LORA") | |
pipe.load_lora_weights("EngageEngine/MJ_LORA.safetensors", adapter_name="MJ_LORA") | |
pipe.load_lora_weights("EngageEngine/MORE_ART_LORA.safetensors", adapter_name="MORE_ART_LORA") | |
pipe.set_adapters(["ENGAGE_LORA", "FILM_LORA", "MJ_LORA", "MORE_ART_LORA"], adapter_weights=[0.0, 0.0, 0.0, 0.0]) | |
pipe.to('cuda') | |
return pipe | |