|
import base64 |
|
import json |
|
import sys |
|
from collections import defaultdict |
|
from io import BytesIO |
|
from pprint import pprint |
|
from typing import Any, Dict, List |
|
import os |
|
from pathlib import Path |
|
from typing import Union |
|
from concurrent.futures import ThreadPoolExecutor |
|
import numpy as np |
|
|
|
import torch |
|
from diffusers import ( |
|
DiffusionPipeline, |
|
DPMSolverMultistepScheduler, |
|
DPMSolverSinglestepScheduler, |
|
EulerAncestralDiscreteScheduler, |
|
utils, |
|
) |
|
from safetensors.torch import load_file |
|
from torch import autocast, tensor |
|
import torchvision.transforms |
|
from PIL import Image |
|
|
|
REPO_DIR = Path(__file__).resolve().parent |
|
|
|
|
|
print(os.getcwd()) |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
if device.type != "cuda": |
|
raise ValueError("need to run on GPU") |
|
|
|
|
|
class EndpointHandler: |
|
LORA_PATHS = { |
|
"hairdetailer": str(REPO_DIR / "lora/hairdetailer.safetensors"), |
|
"lora_leica": str(REPO_DIR / "lora/lora_leica.safetensors"), |
|
"epiNoiseoffset_v2": str(REPO_DIR / "lora/epiNoiseoffset_v2.safetensors"), |
|
"MBHU-TT2FRS": str(REPO_DIR / "lora/MBHU-TT2FRS.safetensors"), |
|
"ShinyOiledSkin_v20": str( |
|
REPO_DIR / "lora/ShinyOiledSkin_v20-LoRA.safetensors" |
|
), |
|
"polyhedron_new_skin_v1.1": str( |
|
REPO_DIR / "lora/polyhedron_new_skin_v1.1.safetensors" |
|
), |
|
"detailed_eye-10": str(REPO_DIR / "lora/detailed_eye-10.safetensors"), |
|
"add_detail": str(REPO_DIR / "lora/add_detail.safetensors"), |
|
"MuscleGirl_v1": str(REPO_DIR / "lora/MuscleGirl_v1.safetensors"), |
|
"flat2": str(REPO_DIR / "lora/flat2.safetensors"), |
|
} |
|
|
|
TEXTUAL_INVERSION = [ |
|
{ |
|
"weight_name": str(REPO_DIR / "embeddings/EasyNegative.safetensors"), |
|
"token": "easynegative", |
|
}, |
|
{ |
|
"weight_name": str(REPO_DIR / "embeddings/badhandv4.pt"), |
|
"token": "badhandv4", |
|
}, |
|
{ |
|
"weight_name": str(REPO_DIR / "embeddings/bad-artist-anime.pt"), |
|
"token": "bad-artist-anime", |
|
}, |
|
{ |
|
"weight_name": str(REPO_DIR / "embeddings/NegfeetV2.pt"), |
|
"token": "negfeetv2", |
|
}, |
|
{ |
|
"weight_name": str(REPO_DIR / "embeddings/ng_deepnegative_v1_75t.pt"), |
|
"token": "ng_deepnegative_v1_75t", |
|
}, |
|
{ |
|
"weight_name": str(REPO_DIR / "embeddings/bad-hands-5.pt"), |
|
"token": "bad-hands-5", |
|
}, |
|
] |
|
|
|
def __init__(self, path="."): |
|
self.inference_progress = {} |
|
self.inference_images = {} |
|
self.total_steps = {} |
|
self.inference_in_progress = False |
|
|
|
self.executor = ThreadPoolExecutor( |
|
max_workers=1 |
|
) |
|
|
|
|
|
self.pipe = DiffusionPipeline.from_pretrained( |
|
path, |
|
custom_pipeline="lpw_stable_diffusion", |
|
torch_dtype=torch.float16, |
|
) |
|
self.pipe = self.pipe.to(device) |
|
|
|
|
|
|
|
|
|
|
|
self.pipe.scheduler = DPMSolverMultistepScheduler.from_config( |
|
self.pipe.scheduler.config, |
|
use_karras_sigmas=True, |
|
algorithm_type="sde-dpmsolver++", |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.pipe.safety_checker = None |
|
|
|
|
|
self.pipe.set_progress_bar_config(disable=True) |
|
|
|
|
|
self.load_embeddings() |
|
|
|
|
|
self.pipe.enable_xformers_memory_efficient_attention() |
|
self.pipe.enable_attention_slicing() |
|
|
|
|
|
def load_lora(self, pipeline, lora_path, lora_weight=0.5): |
|
state_dict = load_file(lora_path) |
|
LORA_PREFIX_UNET = "lora_unet" |
|
LORA_PREFIX_TEXT_ENCODER = "lora_te" |
|
|
|
alpha = lora_weight |
|
visited = [] |
|
|
|
for key in state_dict: |
|
state_dict[key] = state_dict[key].to(device) |
|
|
|
|
|
for key in state_dict: |
|
|
|
if ".alpha" in key or key in visited: |
|
continue |
|
|
|
if "text" in key: |
|
layer_infos = ( |
|
key.split(".")[0] |
|
.split(LORA_PREFIX_TEXT_ENCODER + "_")[-1] |
|
.split("_") |
|
) |
|
curr_layer = pipeline.text_encoder |
|
else: |
|
layer_infos = ( |
|
key.split(".")[0].split(LORA_PREFIX_UNET + "_")[-1].split("_") |
|
) |
|
curr_layer = pipeline.unet |
|
|
|
|
|
temp_name = layer_infos.pop(0) |
|
while len(layer_infos) > -1: |
|
try: |
|
curr_layer = curr_layer.__getattr__(temp_name) |
|
if len(layer_infos) > 0: |
|
temp_name = layer_infos.pop(0) |
|
elif len(layer_infos) == 0: |
|
break |
|
except Exception: |
|
if len(temp_name) > 0: |
|
temp_name += "_" + layer_infos.pop(0) |
|
else: |
|
temp_name = layer_infos.pop(0) |
|
|
|
|
|
pair_keys = [] |
|
if "lora_down" in key: |
|
pair_keys.append(key.replace("lora_down", "lora_up")) |
|
pair_keys.append(key) |
|
else: |
|
pair_keys.append(key) |
|
pair_keys.append(key.replace("lora_up", "lora_down")) |
|
|
|
|
|
if len(state_dict[pair_keys[0]].shape) == 4: |
|
weight_up = ( |
|
state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.float32) |
|
) |
|
weight_down = ( |
|
state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.float32) |
|
) |
|
curr_layer.weight.data += alpha * torch.mm( |
|
weight_up, weight_down |
|
).unsqueeze(2).unsqueeze(3) |
|
else: |
|
weight_up = state_dict[pair_keys[0]].to(torch.float32) |
|
weight_down = state_dict[pair_keys[1]].to(torch.float32) |
|
curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down) |
|
|
|
|
|
for item in pair_keys: |
|
visited.append(item) |
|
|
|
return pipeline |
|
|
|
def load_embeddings(self): |
|
"""Load textual inversions, avoid bad prompts""" |
|
for model in EndpointHandler.TEXTUAL_INVERSION: |
|
self.pipe.load_textual_inversion( |
|
".", weight_name=model["weight_name"], token=model["token"] |
|
) |
|
|
|
def load_selected_loras(self, selections): |
|
"""Load Loras models, can lead to marvelous creations""" |
|
for model_name, weight in selections: |
|
lora_path = EndpointHandler.LORA_PATHS[model_name] |
|
self.pipe = self.load_lora( |
|
pipeline=self.pipe, lora_path=lora_path, lora_weight=weight |
|
) |
|
return self.pipe |
|
|
|
def __call__(self, data: Any) -> Dict: |
|
"""Handle incoming requests.""" |
|
|
|
action = data.get("action", None) |
|
request_id = data.get("request_id") |
|
|
|
|
|
if not request_id: |
|
return {"flag": "error", "message": "Missing request_id."} |
|
|
|
if action == "check_progress": |
|
return self.check_progress(request_id) |
|
|
|
elif action == "inference": |
|
|
|
if self.inference_in_progress: |
|
return { |
|
"flag": "error", |
|
"message": "Another inference is already in progress. Please wait.", |
|
} |
|
|
|
|
|
self.clean_request_data(request_id) |
|
self.inference_in_progress = True |
|
self.inference_progress[request_id] = 0 |
|
self.inference_images[request_id] = None |
|
|
|
self.executor.submit(self.start_inference, data) |
|
|
|
return { |
|
"flag": "success", |
|
"message": "Inference started", |
|
"request_id": request_id, |
|
} |
|
|
|
else: |
|
return {"flag": "error", "message": f"Unsupported action: {action}"} |
|
|
|
def clean_request_data(self, request_id: str): |
|
"""Clean up the data related to a specific request ID.""" |
|
|
|
|
|
self.inference_progress.pop(request_id, None) |
|
|
|
|
|
self.inference_images.pop(request_id, None) |
|
|
|
|
|
self.total_steps.pop(request_id, None) |
|
|
|
|
|
self.inference_in_progress = False |
|
|
|
def progress_callback( |
|
self, |
|
step: int, |
|
timestep: int, |
|
latents: Any, |
|
request_id: str, |
|
status: str, |
|
): |
|
try: |
|
if status == "progress": |
|
|
|
img_data = self.pipe.decode_latents(latents) |
|
img_data = (img_data.squeeze() * 255).astype(np.uint8) |
|
img = Image.fromarray(img_data, "RGB") |
|
|
|
else: |
|
|
|
|
|
img = latents |
|
|
|
buffered = BytesIO() |
|
img.save(buffered, format="PNG") |
|
|
|
|
|
|
|
|
|
|
|
|
|
img_str = base64.b64encode(buffered.getvalue()).decode() |
|
|
|
except Exception as e: |
|
print(f"Error: {e}") |
|
|
|
|
|
progress_percentage = ( |
|
step / self.total_steps[request_id] |
|
) * 100 |
|
|
|
self.inference_progress[request_id] = progress_percentage |
|
self.inference_images[request_id] = img_str |
|
|
|
def check_progress(self, request_id: str) -> Dict[str, Union[str, float]]: |
|
progress = self.inference_progress.get(request_id, 0) |
|
latest_image = self.inference_images.get(request_id, None) |
|
|
|
|
|
|
|
if progress >= 100: |
|
status = "complete" |
|
else: |
|
status = "in-progress" |
|
|
|
return { |
|
"flag": "success", |
|
"status": status, |
|
"progress": int(progress), |
|
"image": latest_image, |
|
} |
|
|
|
def start_inference(self, data: Dict) -> Dict: |
|
"""Start a new inference.""" |
|
|
|
global device |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
required_fields = [ |
|
"prompt", |
|
"negative_prompt", |
|
"width", |
|
"num_inference_steps", |
|
"height", |
|
"guidance_scale", |
|
"request_id", |
|
] |
|
|
|
missing_fields = [field for field in required_fields if field not in data] |
|
|
|
if missing_fields: |
|
return { |
|
"flag": "error", |
|
"message": f"Missing fields: {', '.join(missing_fields)}", |
|
} |
|
|
|
|
|
prompt = data["prompt"] |
|
negative_prompt = data["negative_prompt"] |
|
loras_model = data.get("loras_model", None) |
|
seed = data.get("seed", None) |
|
width = data["width"] |
|
num_inference_steps = data["num_inference_steps"] |
|
height = data["height"] |
|
guidance_scale = data["guidance_scale"] |
|
request_id = data["request_id"] |
|
|
|
|
|
self.total_steps[request_id] = num_inference_steps |
|
|
|
|
|
forced_negative = ( |
|
negative_prompt |
|
+ """, easynegative, badhandv4, bad-artist-anime, negfeetv2, ng_deepnegative_v1_75t, bad-hands-5, """ |
|
) |
|
|
|
|
|
generator = torch.Generator(device="cuda").manual_seed(seed) if seed else None |
|
|
|
|
|
if loras_model: |
|
self.pipe = self.load_selected_loras(loras_model) |
|
|
|
try: |
|
|
|
with autocast(device.type): |
|
image = self.pipe.text2img( |
|
prompt=prompt, |
|
guidance_scale=guidance_scale, |
|
num_inference_steps=num_inference_steps, |
|
height=height, |
|
width=width, |
|
negative_prompt=forced_negative, |
|
generator=generator, |
|
max_embeddings_multiples=5, |
|
callback=lambda step, timestep, latents: self.progress_callback( |
|
step, timestep, latents, request_id, "progress" |
|
), |
|
callback_steps=8, |
|
|
|
).images[0] |
|
|
|
|
|
self.progress_callback( |
|
num_inference_steps, 0, image, request_id, "complete" |
|
) |
|
|
|
|
|
|
|
|
|
except Exception as e: |
|
|
|
return {"flag": "error", "message": str(e)} |
|
|