from email.policy import default from json import encoder import gradio as gr import spaces import numpy as np import torch import requests import random import os import sys import pickle from PIL import Image from tqdm.auto import tqdm from datetime import datetime import torch.nn as nn import torch.nn.functional as F class AttnProcessor(nn.Module): r""" Default processor for performing attention-related computations. """ def __init__( self, hidden_size=None, cross_attention_dim=None, ): super().__init__() def __call__( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None, ): residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states import diffusers from diffusers import StableDiffusionXLPipeline from diffusers import DDIMScheduler import torch.nn.functional as F from transformers.models.clip.configuration_clip import CLIPVisionConfig def cal_attn_mask(total_length,id_length,sa16,sa32,sa64,device="cuda",dtype= torch.float16): bool_matrix256 = torch.rand((1, total_length * 256),device = device,dtype = dtype) < sa16 bool_matrix1024 = torch.rand((1, total_length * 1024),device = device,dtype = dtype) < sa32 bool_matrix4096 = torch.rand((1, total_length * 4096),device = device,dtype = dtype) < sa64 bool_matrix256 = bool_matrix256.repeat(total_length,1) bool_matrix1024 = bool_matrix1024.repeat(total_length,1) bool_matrix4096 = bool_matrix4096.repeat(total_length,1) for i in range(total_length): bool_matrix256[i:i+1,id_length*256:] = False bool_matrix1024[i:i+1,id_length*1024:] = False bool_matrix4096[i:i+1,id_length*4096:] = False bool_matrix256[i:i+1,i*256:(i+1)*256] = True bool_matrix1024[i:i+1,i*1024:(i+1)*1024] = True bool_matrix4096[i:i+1,i*4096:(i+1)*4096] = True mask256 = bool_matrix256.unsqueeze(1).repeat(1,256,1).reshape(-1,total_length * 256) mask1024 = bool_matrix1024.unsqueeze(1).repeat(1,1024,1).reshape(-1,total_length * 1024) mask4096 = bool_matrix4096.unsqueeze(1).repeat(1,4096,1).reshape(-1,total_length * 4096) return mask256,mask1024,mask4096 def cal_attn_mask_xl(total_length,id_length,sa32,sa64,height,width,device="cuda",dtype= torch.float16): nums_1024 = (height // 32) * (width // 32) nums_4096 = (height // 16) * (width // 16) bool_matrix1024 = torch.rand((1, total_length * nums_1024),device = device,dtype = dtype) < sa32 bool_matrix4096 = torch.rand((1, total_length * nums_4096),device = device,dtype = dtype) < sa64 bool_matrix1024 = bool_matrix1024.repeat(total_length,1) bool_matrix4096 = bool_matrix4096.repeat(total_length,1) for i in range(total_length): bool_matrix1024[i:i+1,id_length*nums_1024:] = False bool_matrix4096[i:i+1,id_length*nums_4096:] = False bool_matrix1024[i:i+1,i*nums_1024:(i+1)*nums_1024] = True bool_matrix4096[i:i+1,i*nums_4096:(i+1)*nums_4096] = True mask1024 = bool_matrix1024.unsqueeze(1).repeat(1,nums_1024,1).reshape(-1,total_length * nums_1024) mask4096 = bool_matrix4096.unsqueeze(1).repeat(1,nums_4096,1).reshape(-1,total_length * nums_4096) return mask1024,mask4096 import copy import os from huggingface_hub import hf_hub_download from diffusers.utils import load_image from transformers.models.clip.modeling_clip import CLIPVisionModelWithProjection from email.mime import image import torch import base64 import gradio as gr import numpy as np from PIL import Image,ImageOps,ImageDraw, ImageFont from io import BytesIO import random MAX_COLORS = 12 def get_random_bool(): return random.choice([True, False]) def add_white_border(input_image, border_width=10): """ 为PIL图像添加指定宽度的白色边框。 :param input_image: PIL图像对象 :param border_width: 边框宽度(单位:像素) :return: 带有白色边框的PIL图像对象 """ border_color = 'white' # 白色边框 # 添加边框 img_with_border = ImageOps.expand(input_image, border=border_width, fill=border_color) return img_with_border def process_mulline_text(draw, text, font, max_width): """ Draw the text on an image with word wrapping. """ lines = [] # Store the lines of text here words = text.split() # Start building lines of text, and wrap when necessary current_line = "" for word in words: test_line = f"{current_line} {word}".strip() # Check the width of the line with this word added width, _ = draw.textsize(test_line, font=font) if width <= max_width: # If it fits, add this word to the current line current_line = test_line else: # If not, store the line and start a new one lines.append(current_line) current_line = word # Add the last line lines.append(current_line) return lines def add_caption(image, text, position = "bottom-mid", font = None, text_color= 'black', bg_color = (255, 255, 255) , bg_opacity = 200): if text == "": return image image = image.convert("RGBA") draw = ImageDraw.Draw(image) width, height = image.size lines = process_mulline_text(draw,text,font,width) text_positions = [] maxwidth = 0 for ind, line in enumerate(lines[::-1]): text_width, text_height = draw.textsize(line, font=font) if position == 'bottom-right': text_position = (width - text_width - 10, height - (text_height + 20)) elif position == 'bottom-left': text_position = (10, height - (text_height + 20)) elif position == 'bottom-mid': text_position = ((width - text_width) // 2, height - (text_height + 20) ) # 居中文本 height = text_position[1] maxwidth = max(maxwidth,text_width) text_positions.append(text_position) rectpos = (width - maxwidth) // 2 rectangle_position = [rectpos - 5, text_positions[-1][1] - 5, rectpos + maxwidth + 5, text_positions[0][1] + text_height + 5] image_with_transparency = Image.new('RGBA', image.size) draw_with_transparency = ImageDraw.Draw(image_with_transparency) draw_with_transparency.rectangle(rectangle_position, fill=bg_color + (bg_opacity,)) image.paste(Image.alpha_composite(image.convert('RGBA'), image_with_transparency)) print(ind,text_position) draw = ImageDraw.Draw(image) for ind, line in enumerate(lines[::-1]): text_position = text_positions[ind] draw.text(text_position, line, fill=text_color, font=font) return image.convert('RGB') def get_comic(images,types = "4panel",captions = [],font = None,pad_image = None): if pad_image == None: pad_image = Image.open("./images/pad_images.png") if font == None: font = ImageFont.truetype("./fonts/Inkfree.ttf", int(30 * images[0].size[1] / 1024)) if types == "No typesetting (default)": return images elif types == "Four Pannel": return get_comic_4panel(images,captions,font,pad_image) else: # "Classic Comic Style" return get_comic_classical(images,captions,font,pad_image) def get_caption_group(images_groups,captions = []): caption_groups = [] for i in range(len(images_groups)): length = len(images_groups[i]) caption_groups.append(captions[:length]) captions = captions[length:] if len(caption_groups[-1]) < len(images_groups[-1]): caption_groups[-1] = caption_groups[-1] + [""] * (len(images_groups[-1]) - len(caption_groups[-1])) return caption_groups class MLP(nn.Module): def __init__(self, in_dim, out_dim, hidden_dim, use_residual=True): super().__init__() if use_residual: assert in_dim == out_dim self.layernorm = nn.LayerNorm(in_dim) self.fc1 = nn.Linear(in_dim, hidden_dim) self.fc2 = nn.Linear(hidden_dim, out_dim) self.use_residual = use_residual self.act_fn = nn.GELU() def forward(self, x): residual = x x = self.layernorm(x) x = self.fc1(x) x = self.act_fn(x) x = self.fc2(x) if self.use_residual: x = x + residual return x def get_comic_classical(images,captions = None,font = None,pad_image = None): if pad_image == None: raise ValueError("pad_image is None") images = [add_white_border(image) for image in images] pad_image = pad_image.resize(images[0].size, Image.ANTIALIAS) images_groups = distribute_images2(images,pad_image) print(images_groups) if captions != None: captions_groups = get_caption_group(images_groups,captions) # print(images_groups) row_images = [] for ind, img_group in enumerate(images_groups): row_images.append(get_row_image2(img_group ,captions= captions_groups[ind] if captions != None else None,font = font)) return [combine_images_vertically_with_resize(row_images)] class FuseModule(nn.Module): def __init__(self, embed_dim): super().__init__() self.mlp1 = MLP(embed_dim * 2, embed_dim, embed_dim, use_residual=False) self.mlp2 = MLP(embed_dim, embed_dim, embed_dim, use_residual=True) self.layer_norm = nn.LayerNorm(embed_dim) def fuse_fn(self, prompt_embeds, id_embeds): stacked_id_embeds = torch.cat([prompt_embeds, id_embeds], dim=-1) stacked_id_embeds = self.mlp1(stacked_id_embeds) + prompt_embeds stacked_id_embeds = self.mlp2(stacked_id_embeds) stacked_id_embeds = self.layer_norm(stacked_id_embeds) return stacked_id_embeds def forward( self, prompt_embeds, id_embeds, class_tokens_mask, ) -> torch.Tensor: # id_embeds shape: [b, max_num_inputs, 1, 2048] id_embeds = id_embeds.to(prompt_embeds.dtype) num_inputs = class_tokens_mask.sum().unsqueeze(0) # TODO: check for training case batch_size, max_num_inputs = id_embeds.shape[:2] # seq_length: 77 seq_length = prompt_embeds.shape[1] # flat_id_embeds shape: [b*max_num_inputs, 1, 2048] flat_id_embeds = id_embeds.view( -1, id_embeds.shape[-2], id_embeds.shape[-1] ) # valid_id_mask [b*max_num_inputs] valid_id_mask = ( torch.arange(max_num_inputs, device=flat_id_embeds.device)[None, :] < num_inputs[:, None] ) valid_id_embeds = flat_id_embeds[valid_id_mask.flatten()] prompt_embeds = prompt_embeds.view(-1, prompt_embeds.shape[-1]) class_tokens_mask = class_tokens_mask.view(-1) valid_id_embeds = valid_id_embeds.view(-1, valid_id_embeds.shape[-1]) # slice out the image token embeddings image_token_embeds = prompt_embeds[class_tokens_mask] stacked_id_embeds = self.fuse_fn(image_token_embeds, valid_id_embeds) assert class_tokens_mask.sum() == stacked_id_embeds.shape[0], f"{class_tokens_mask.sum()} != {stacked_id_embeds.shape[0]}" prompt_embeds.masked_scatter_(class_tokens_mask[:, None], stacked_id_embeds.to(prompt_embeds.dtype)) updated_prompt_embeds = prompt_embeds.view(batch_size, seq_length, -1) return updated_prompt_embeds def get_comic_4panel(images,captions = [],font = None,pad_image = None): if pad_image == None: raise ValueError("pad_image is None") pad_image = pad_image.resize(images[0].size, Image.ANTIALIAS) images = [add_white_border(image) for image in images] assert len(captions) == len(images) for i,caption in enumerate(captions): images[i] = add_caption(images[i],caption,font = font) images_nums = len(images) pad_nums = int((4 - images_nums % 4) % 4) images = images + [pad_image for _ in range(pad_nums)] comics = [] assert len(images)%4 == 0 for i in range(len(images)//4): comics.append(combine_images_vertically_with_resize([combine_images_horizontally(images[i*4:i*4+2]), combine_images_horizontally(images[i*4+2:i*4+4])])) return comics def get_row_image(images): row_image_arr = [] if len(images)>3: stack_img_nums = (len(images) - 2)//2 else: stack_img_nums = 0 while(len(images)>0): if stack_img_nums <=0: row_image_arr.append(images[0]) images = images[1:] elif len(images)>stack_img_nums*2: if get_random_bool(): row_image_arr.append(concat_images_vertically_and_scale(images[:2])) images = images[2:] stack_img_nums -=1 else: row_image_arr.append(images[0]) images = images[1:] else: row_image_arr.append(concat_images_vertically_and_scale(images[:2])) images = images[2:] stack_img_nums-=1 return combine_images_horizontally(row_image_arr) def get_row_image2(images,captions = None, font = None): row_image_arr = [] if len(images)== 6: sequence_list = [1,1,2,2] elif len(images)== 4: sequence_list = [1,1,2] else: raise ValueError("images nums is not 4 or 6 found",len(images)) random.shuffle(sequence_list) index = 0 for length in sequence_list: if length == 1: if captions != None: images_tmp = add_caption(images[0],text = captions[index],font= font) else: images_tmp = images[0] row_image_arr.append( images_tmp) images = images[1:] index +=1 elif length == 2: row_image_arr.append(concat_images_vertically_and_scale(images[:2])) images = images[2:] index +=2 return combine_images_horizontally(row_image_arr) VISION_CONFIG_DICT = { "hidden_size": 1024, "intermediate_size": 4096, "num_attention_heads": 16, "num_hidden_layers": 24, "patch_size": 14, "projection_dim": 768 } def concat_images_vertically_and_scale(images,scale_factor=2): # 加载所有图像 # 确保所有图像的宽度一致 widths = [img.width for img in images] if not all(width == widths[0] for width in widths): raise ValueError('All images must have the same width.') # 计算总高度 total_height = sum(img.height for img in images) # 创建新的图像,宽度与原图相同,高度为所有图像高度之和 max_width = max(widths) concatenated_image = Image.new('RGB', (max_width, total_height)) # 竖直拼接图像 current_height = 0 for img in images: concatenated_image.paste(img, (0, current_height)) current_height += img.height # 缩放图像为1/n高度 new_height = concatenated_image.height // scale_factor new_width = concatenated_image.width // scale_factor resized_image = concatenated_image.resize((new_width, new_height), Image.ANTIALIAS) return resized_image def combine_images_horizontally(images): # 读取所有图片并存入列表 # 获取每幅图像的宽度和高度 widths, heights = zip(*(i.size for i in images)) # 计算总宽度和最大高度 total_width = sum(widths) max_height = max(heights) # 创建新的空白图片,用于拼接 new_im = Image.new('RGB', (total_width, max_height)) # 将图片横向拼接 x_offset = 0 for im in images: new_im.paste(im, (x_offset, 0)) x_offset += im.width return new_im def combine_images_vertically_with_resize(images): # 获取所有图片的宽度和高度 widths, heights = zip(*(i.size for i in images)) # 确定新图片的宽度,即所有图片中最小的宽度 min_width = min(widths) # 调整图片尺寸以保持宽度一致,长宽比不变 resized_images = [] for img in images: # 计算新高度保持图片长宽比 new_height = int(min_width * img.height / img.width) # 调整图片大小 resized_img = img.resize((min_width, new_height), Image.ANTIALIAS) resized_images.append(resized_img) # 计算所有调整尺寸后图片的总高度 total_height = sum(img.height for img in resized_images) # 创建一个足够宽和高的新图片对象 new_im = Image.new('RGB', (min_width, total_height)) # 竖直拼接图片 y_offset = 0 for im in resized_images: new_im.paste(im, (0, y_offset)) y_offset += im.height return new_im def distribute_images2(images, pad_image): groups = [] remaining = len(images) if len(images) <= 8: group_sizes = [4] else: group_sizes = [4, 6] size_index = 0 while remaining > 0: size = group_sizes[size_index%len(group_sizes)] if remaining < size and remaining < min(group_sizes): size = min(group_sizes) if remaining > size: new_group = images[-remaining: -remaining + size] else: new_group = images[-remaining:] groups.append(new_group) size_index += 1 remaining -= size print(remaining,groups) groups[-1] = groups[-1] + [pad_image for _ in range(-remaining)] return groups def distribute_images(images, group_sizes=(4, 3, 2)): groups = [] remaining = len(images) while remaining > 0: # 优先分配最大组(4张图片),再考虑3张,最后处理2张 for size in sorted(group_sizes, reverse=True): # 如果剩下的图片数量大于等于当前组大小,或者为图片总数时(也就是第一次迭代) # 开始创建新组 if remaining >= size or remaining == len(images): if remaining > size: new_group = images[-remaining: -remaining + size] else: new_group = images[-remaining:] groups.append(new_group) remaining -= size break # 如果剩下的图片少于最小的组大小(2张)并且已经有组了,就把剩下的图片加到最后一个组 elif remaining < min(group_sizes) and groups: groups[-1].extend(images[-remaining:]) remaining = 0 return groups def create_binary_matrix(img_arr, target_color): mask = np.all(img_arr == target_color, axis=-1) binary_matrix = mask.astype(int) return binary_matrix def preprocess_mask(mask_, h, w, device): mask = np.array(mask_) mask = mask.astype(np.float32) mask = mask[None, None] mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask).to(device) mask = torch.nn.functional.interpolate(mask, size=(h, w), mode='nearest') return mask def process_sketch(canvas_data): binary_matrixes = [] base64_img = canvas_data['image'] image_data = base64.b64decode(base64_img.split(',')[1]) image = Image.open(BytesIO(image_data)).convert("RGB") im2arr = np.array(image) colors = [tuple(map(int, rgb[4:-1].split(','))) for rgb in canvas_data['colors']] colors_fixed = [] r, g, b = 255, 255, 255 binary_matrix = create_binary_matrix(im2arr, (r,g,b)) binary_matrixes.append(binary_matrix) binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1)) colored_map = binary_matrix_*(r,g,b) + (1-binary_matrix_)*(50,50,50) colors_fixed.append(gr.update(value=colored_map.astype(np.uint8))) for color in colors: r, g, b = color if any(c != 255 for c in (r, g, b)): binary_matrix = create_binary_matrix(im2arr, (r,g,b)) binary_matrixes.append(binary_matrix) binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1)) colored_map = binary_matrix_*(r,g,b) + (1-binary_matrix_)*(50,50,50) colors_fixed.append(gr.update(value=colored_map.astype(np.uint8))) visibilities = [] colors = [] for n in range(MAX_COLORS): visibilities.append(gr.update(visible=False)) colors.append(gr.update()) for n in range(len(colors_fixed)): visibilities[n] = gr.update(visible=True) colors[n] = colors_fixed[n] return [gr.update(visible=True), binary_matrixes, *visibilities, *colors] def process_prompts(binary_matrixes, *seg_prompts): return [gr.update(visible=True), gr.update(value=' , '.join(seg_prompts[:len(binary_matrixes)]))] def process_example(layout_path, all_prompts, seed_): all_prompts = all_prompts.split('***') binary_matrixes = [] colors_fixed = [] im2arr = np.array(Image.open(layout_path))[:,:,:3] unique, counts = np.unique(np.reshape(im2arr,(-1,3)), axis=0, return_counts=True) sorted_idx = np.argsort(-counts) binary_matrix = create_binary_matrix(im2arr, (0,0,0)) binary_matrixes.append(binary_matrix) binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1)) colored_map = binary_matrix_*(255,255,255) + (1-binary_matrix_)*(50,50,50) colors_fixed.append(gr.update(value=colored_map.astype(np.uint8))) for i in range(len(all_prompts)-1): r, g, b = unique[sorted_idx[i]] if any(c != 255 for c in (r, g, b)) and any(c != 0 for c in (r, g, b)): binary_matrix = create_binary_matrix(im2arr, (r,g,b)) binary_matrixes.append(binary_matrix) binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1)) colored_map = binary_matrix_*(r,g,b) + (1-binary_matrix_)*(50,50,50) colors_fixed.append(gr.update(value=colored_map.astype(np.uint8))) visibilities = [] colors = [] prompts = [] for n in range(MAX_COLORS): visibilities.append(gr.update(visible=False)) colors.append(gr.update()) prompts.append(gr.update()) for n in range(len(colors_fixed)): visibilities[n] = gr.update(visible=True) colors[n] = colors_fixed[n] prompts[n] = all_prompts[n+1] return [gr.update(visible=True), binary_matrixes, *visibilities, *colors, *prompts, gr.update(visible=True), gr.update(value=all_prompts[0]), int(seed_)] style_list = [ { "name": "(No style)", "prompt": "{prompt}", "negative_prompt": "", }, { "name": "Japanese Anime", "prompt": "anime artwork illustrating {prompt}. created by japanese anime studio. highly emotional. best quality, high resolution", "negative_prompt": "low quality, low resolution" }, { "name": "Cinematic", "prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy", "negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured", }, { "name": "Disney Charactor", "prompt": "A Pixar animation character of {prompt} . pixar-style, studio anime, Disney, high-quality", "negative_prompt": "lowres, bad anatomy, bad hands, text, bad eyes, bad arms, bad legs, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, blurry, grayscale, noisy, sloppy, messy, grainy, highly detailed, ultra textured, photo", }, { "name": "Photographic", "prompt": "cinematic photo {prompt} . 35mm photograph, film, bokeh, professional, 4k, highly detailed", "negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly", }, { "name": "Comic book", "prompt": "comic {prompt} . graphic illustration, comic art, graphic novel art, vibrant, highly detailed", "negative_prompt": "photograph, deformed, glitch, noisy, realistic, stock photo", }, { "name": "Line art", "prompt": "line art drawing {prompt} . professional, sleek, modern, minimalist, graphic, line art, vector graphics", "negative_prompt": "anime, photorealistic, 35mm film, deformed, glitch, blurry, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, mutated, realism, realistic, impressionism, expressionism, oil, acrylic", } ] styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list} image_encoder_path = "./data/models/ip_adapter/sdxl_models/image_encoder" ip_ckpt = "./data/models/ip_adapter/sdxl_models/ip-adapter_sdxl_vit-h.bin" os.environ["no_proxy"] = "localhost,127.0.0.1,::1" STYLE_NAMES = list(styles.keys()) DEFAULT_STYLE_NAME = "Japanese Anime" global models_dict use_va = True models_dict = { # "Juggernaut": "RunDiffusion/Juggernaut-XL-v8", "RealVision": "SG161222/RealVisXL_V4.0" , # "SDXL":"stabilityai/stable-diffusion-xl-base-1.0" , "Unstable": "stablediffusionapi/sdxl-unstable-diffusers-y" } photomaker_path = hf_hub_download(repo_id="TencentARC/PhotoMaker", filename="photomaker-v1.bin", repo_type="model") MAX_SEED = np.iinfo(np.int32).max def setup_seed(seed): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.backends.cudnn.deterministic = True def set_text_unfinished(): return gr.update(visible=True, value="

(Not Finished) Generating ··· The intermediate results will be shown.

") def set_text_finished(): return gr.update(visible=True, value="

Generation Finished

") ################################################# def get_image_path_list(folder_name): image_basename_list = os.listdir(folder_name) image_path_list = sorted([os.path.join(folder_name, basename) for basename in image_basename_list]) return image_path_list ################################################# class SpatialAttnProcessor2_0(torch.nn.Module): r""" Attention processor for IP-Adapater for PyTorch 2.0. Args: hidden_size (`int`): The hidden size of the attention layer. cross_attention_dim (`int`): The number of channels in the `encoder_hidden_states`. text_context_len (`int`, defaults to 77): The context length of the text features. scale (`float`, defaults to 1.0): the weight scale of image prompt. """ def __init__(self, hidden_size = None, cross_attention_dim=None,id_length = 4,device = "cuda",dtype = torch.float16): super().__init__() if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") self.device = device self.dtype = dtype self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.total_length = id_length + 1 self.id_length = id_length self.id_bank = {} def __call__( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None): # un_cond_hidden_states, cond_hidden_states = hidden_states.chunk(2) # un_cond_hidden_states = self.__call2__(attn, un_cond_hidden_states,encoder_hidden_states,attention_mask,temb) global total_count,attn_count,cur_step,mask1024,mask4096 global sa32, sa64 global write global height,width global num_steps if write: # print(f"white:{cur_step}") self.id_bank[cur_step] = [hidden_states[:self.id_length], hidden_states[self.id_length:]] else: encoder_hidden_states = torch.cat((self.id_bank[cur_step][0].to(self.device),hidden_states[:1],self.id_bank[cur_step][1].to(self.device),hidden_states[1:])) # 判断随机数是否大于0.5 if cur_step <=1: hidden_states = self.__call2__(attn, hidden_states,None,attention_mask,temb) else: # 256 1024 4096 random_number = random.random() if cur_step <0.4 * num_steps: rand_num = 0.3 else: rand_num = 0.1 # print(f"hidden state shape {hidden_states.shape[1]}") if random_number > rand_num: # print("mask shape",mask1024.shape,mask4096.shape) if not write: if hidden_states.shape[1] == (height//32) * (width//32): attention_mask = mask1024[mask1024.shape[0] // self.total_length * self.id_length:] else: attention_mask = mask4096[mask4096.shape[0] // self.total_length * self.id_length:] else: # print(self.total_length,self.id_length,hidden_states.shape,(height//32) * (width//32)) if hidden_states.shape[1] == (height//32) * (width//32): attention_mask = mask1024[:mask1024.shape[0] // self.total_length * self.id_length,:mask1024.shape[0] // self.total_length * self.id_length] else: attention_mask = mask4096[:mask4096.shape[0] // self.total_length * self.id_length,:mask4096.shape[0] // self.total_length * self.id_length] # print(attention_mask.shape) # print("before attention",hidden_states.shape,attention_mask.shape,encoder_hidden_states.shape if encoder_hidden_states is not None else "None") hidden_states = self.__call1__(attn, hidden_states,encoder_hidden_states,attention_mask,temb) else: hidden_states = self.__call2__(attn, hidden_states,None,attention_mask,temb) attn_count +=1 if attn_count == total_count: attn_count = 0 cur_step += 1 mask1024,mask4096 = cal_attn_mask_xl(self.total_length,self.id_length,sa32,sa64,height,width, device=self.device, dtype= self.dtype) return hidden_states def __call1__( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None, ): # print("hidden state shape",hidden_states.shape,self.id_length) residual = hidden_states # if encoder_hidden_states is not None: # raise Exception("not implement") if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: total_batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(total_batch_size, channel, height * width).transpose(1, 2) total_batch_size,nums_token,channel = hidden_states.shape img_nums = total_batch_size//2 hidden_states = hidden_states.view(-1,img_nums,nums_token,channel).reshape(-1,img_nums * nums_token,channel) batch_size, sequence_length, _ = hidden_states.shape if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states # B, N, C else: encoder_hidden_states = encoder_hidden_states.view(-1,self.id_length+1,nums_token,channel).reshape(-1,(self.id_length+1) * nums_token,channel) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # print(key.shape,value.shape,query.shape,attention_mask.shape) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 #print(query.shape,key.shape,value.shape,attention_mask.shape) hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(total_batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) # if input_ndim == 4: # tile_hidden_states = tile_hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) # if attn.residual_connection: # tile_hidden_states = tile_hidden_states + residual if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(total_batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor # print(hidden_states.shape) return hidden_states def __call2__( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None): residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, channel = ( hidden_states.shape ) # print(hidden_states.shape) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states # B, N, C else: encoder_hidden_states = encoder_hidden_states.view(-1,self.id_length+1,sequence_length,channel).reshape(-1,(self.id_length+1) * sequence_length,channel) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states def set_attention_processor(unet,id_length,is_ipadapter = False): global total_count total_count = 0 attn_procs = {} for name in unet.attn_processors.keys(): cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = unet.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(unet.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = unet.config.block_out_channels[block_id] if cross_attention_dim is None: if name.startswith("up_blocks") : attn_procs[name] = SpatialAttnProcessor2_0(id_length = id_length) total_count +=1 else: attn_procs[name] = AttnProcessor() else: if is_ipadapter: attn_procs[name] = IPAttnProcessor2_0( hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1, num_tokens=4, ).to(unet.device, dtype=torch.float16) else: attn_procs[name] = AttnProcessor() unet.set_attn_processor(copy.deepcopy(attn_procs)) print("successsfully load paired self-attention") print(f"number of the processor : {total_count}") canvas_html = "
" load_js = """ async () => { const url = "https://huggingface.co/datasets/radames/gradio-components/raw/main/sketch-canvas.js" fetch(url) .then(res => res.text()) .then(text => { const script = document.createElement('script'); script.type = "module" script.src = URL.createObjectURL(new Blob([text], { type: 'application/javascript' })); document.head.appendChild(script); }); } """ get_js_colors = """ async (canvasData) => { const canvasEl = document.getElementById("canvas-root"); return [canvasEl._data] } """ css = ''' #color-bg{display:flex;justify-content: center;align-items: center;} .color-bg-item{width: 100%; height: 32px} #main_button{width:100%}