Spaces:
Running
Running
#!/usr/bin/env python | |
# coding=utf-8 | |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""Fine-tuning script for Stable Diffusion XL for text2image with support for LoRA.""" | |
import argparse | |
import copy | |
import itertools | |
import logging | |
import math | |
import os | |
import random | |
import shutil | |
from pathlib import Path | |
from typing import Dict | |
import datasets | |
import diffusers | |
import numpy as np | |
import torch | |
import torch.nn.functional as F | |
import torch.utils.checkpoint | |
import transformers | |
from accelerate import Accelerator | |
from accelerate.logging import get_logger | |
from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed | |
from diffusers import ( | |
AutoencoderKL, | |
DDPMScheduler, | |
StableDiffusionXLPipeline, | |
UNet2DConditionModel, | |
) | |
from diffusers.loaders import LoraLoaderMixin | |
from diffusers.models.lora import LoRALinearLayer | |
from diffusers.optimization import get_scheduler | |
from diffusers.training_utils import compute_snr | |
from diffusers.utils import check_min_version, is_wandb_available | |
from diffusers.utils.import_utils import is_xformers_available | |
from huggingface_hub import create_repo, upload_folder | |
from packaging import version | |
from torchvision import transforms | |
from torchvision.transforms.functional import crop | |
from tqdm.auto import tqdm | |
from transformers import PretrainedConfig | |
from dreamcreature.attn_processor import AttnProcessorCustom | |
from dreamcreature.dataset import DreamCreatureDataset | |
from dreamcreature.dino import DINO | |
from dreamcreature.kmeans_segmentation import KMeansSegmentation | |
from dreamcreature.loss import dreamcreature_loss | |
from dreamcreature.mapper import TokenMapper | |
from dreamcreature.pipeline_xl import DreamCreatureSDXLPipeline | |
from dreamcreature.text_encoder import CustomCLIPTextModel, CustomCLIPTextModelWithProjection | |
from dreamcreature.tokenizer import MultiTokenCLIPTokenizer | |
from utils import add_tokens, tokenize_prompt, get_attn_processors | |
IMAGENET_TEMPLATES = [ | |
"a photo of a {}", | |
"a rendering of a {}", | |
"a cropped photo of the {}", | |
"the photo of a {}", | |
"a photo of a clean {}", | |
"a photo of a dirty {}", | |
"a dark photo of the {}", | |
"a photo of my {}", | |
"a photo of the cool {}", | |
"a close-up photo of a {}", | |
"a bright photo of the {}", | |
"a cropped photo of a {}", | |
"a photo of the {}", | |
"a good photo of the {}", | |
"a photo of one {}", | |
"a close-up photo of the {}", | |
"a rendition of the {}", | |
"a photo of the clean {}", | |
"a rendition of a {}", | |
"a photo of a nice {}", | |
"a good photo of a {}", | |
"a photo of the nice {}", | |
"a photo of the small {}", | |
"a photo of the weird {}", | |
"a photo of the large {}", | |
"a photo of a cool {}", | |
"a photo of a small {}", | |
] | |
# Will error if the minimal version of diffusers is not installed. Remove at your own risks. | |
check_min_version("0.25.0.dev0") | |
logger = get_logger(__name__) | |
# TODO: This function should be removed once training scripts are rewritten in PEFT | |
def text_encoder_lora_state_dict(text_encoder): | |
state_dict = {} | |
def text_encoder_attn_modules(text_encoder): | |
from transformers import CLIPTextModel, CLIPTextModelWithProjection | |
attn_modules = [] | |
if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)): | |
for i, layer in enumerate(text_encoder.text_model.encoder.layers): | |
name = f"text_model.encoder.layers.{i}.self_attn" | |
mod = layer.self_attn | |
attn_modules.append((name, mod)) | |
return attn_modules | |
for name, module in text_encoder_attn_modules(text_encoder): | |
for k, v in module.q_proj.lora_linear_layer.state_dict().items(): | |
state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v | |
for k, v in module.k_proj.lora_linear_layer.state_dict().items(): | |
state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v | |
for k, v in module.v_proj.lora_linear_layer.state_dict().items(): | |
state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v | |
for k, v in module.out_proj.lora_linear_layer.state_dict().items(): | |
state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v | |
return state_dict | |
def save_model_card( | |
repo_id: str, | |
images=None, | |
base_model=str, | |
dataset_name=str, | |
train_text_encoder=False, | |
repo_folder=None, | |
vae_path=None, | |
): | |
img_str = "" | |
for i, image in enumerate(images): | |
image.save(os.path.join(repo_folder, f"image_{i}.png")) | |
img_str += f"![img_{i}](./image_{i}.png)\n" | |
yaml = f""" | |
--- | |
license: creativeml-openrail-m | |
base_model: {base_model} | |
dataset: {dataset_name} | |
tags: | |
- stable-diffusion-xl | |
- stable-diffusion-xl-diffusers | |
- text-to-image | |
- diffusers | |
- lora | |
inference: true | |
--- | |
""" | |
model_card = f""" | |
# LoRA text2image fine-tuning - {repo_id} | |
These are LoRA adaption weights for {base_model}. The weights were fine-tuned on the {dataset_name} dataset. You can find some example images in the following. \n | |
{img_str} | |
LoRA for the text encoder was enabled: {train_text_encoder}. | |
Special VAE used for training: {vae_path}. | |
""" | |
with open(os.path.join(repo_folder, "README.md"), "w") as f: | |
f.write(yaml + model_card) | |
def import_model_class_from_model_name_or_path( | |
pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" | |
): | |
text_encoder_config = PretrainedConfig.from_pretrained( | |
pretrained_model_name_or_path, subfolder=subfolder, revision=revision | |
) | |
model_class = text_encoder_config.architectures[0] | |
if model_class == "CLIPTextModel": | |
from transformers import CLIPTextModel | |
return CLIPTextModel | |
elif model_class == "CLIPTextModelWithProjection": | |
from transformers import CLIPTextModelWithProjection | |
return CLIPTextModelWithProjection | |
else: | |
raise ValueError(f"{model_class} is not supported.") | |
def parse_args(input_args=None): | |
parser = argparse.ArgumentParser(description="Simple example of a training script.") | |
parser.add_argument( | |
"--pretrained_model_name_or_path", | |
type=str, | |
default=None, | |
required=True, | |
help="Path to pretrained model or model identifier from huggingface.co/models.", | |
) | |
parser.add_argument( | |
"--pretrained_vae_model_name_or_path", | |
type=str, | |
default=None, | |
help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.", | |
) | |
parser.add_argument( | |
"--revision", | |
type=str, | |
default=None, | |
required=False, | |
help="Revision of pretrained model identifier from huggingface.co/models.", | |
) | |
parser.add_argument( | |
"--variant", | |
type=str, | |
default=None, | |
help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", | |
) | |
parser.add_argument( | |
"--dataset_name", | |
type=str, | |
default=None, | |
help=( | |
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," | |
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," | |
" or to a folder containing files that 🤗 Datasets can understand." | |
), | |
) | |
parser.add_argument( | |
"--dataset_config_name", | |
type=str, | |
default=None, | |
help="The config of the Dataset, leave as None if there's only one config.", | |
) | |
parser.add_argument( | |
"--train_data_dir", | |
type=str, | |
default=None, | |
help=( | |
"A folder containing the training data. Folder contents must follow the structure described in" | |
" https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" | |
" must exist to provide the captions for the images. Ignored if `dataset_name` is specified." | |
), | |
) | |
parser.add_argument( | |
"--image_column", type=str, default="image", help="The column of the dataset containing an image." | |
) | |
parser.add_argument( | |
"--caption_column", | |
type=str, | |
default="text", | |
help="The column of the dataset containing a caption or a list of captions.", | |
) | |
parser.add_argument( | |
"--validation_prompt", | |
type=str, | |
default=None, | |
help="A prompt that is used during validation to verify that the model is learning.", | |
) | |
parser.add_argument( | |
"--num_validation_images", | |
type=int, | |
default=4, | |
help="Number of images that should be generated during validation with `validation_prompt`.", | |
) | |
parser.add_argument( | |
"--validation_epochs", | |
type=int, | |
default=1, | |
help=( | |
"Run fine-tuning validation every X epochs. The validation process consists of running the prompt" | |
" `args.validation_prompt` multiple times: `args.num_validation_images`." | |
), | |
) | |
parser.add_argument( | |
"--max_train_samples", | |
type=int, | |
default=None, | |
help=( | |
"For debugging purposes or quicker training, truncate the number of training examples to this " | |
"value if set." | |
), | |
) | |
parser.add_argument( | |
"--output_dir", | |
type=str, | |
default="sd-model-finetuned-lora", | |
help="The output directory where the model predictions and checkpoints will be written.", | |
) | |
parser.add_argument( | |
"--cache_dir", | |
type=str, | |
default=None, | |
help="The directory where the downloaded models and datasets will be stored.", | |
) | |
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") | |
parser.add_argument( | |
"--resolution", | |
type=int, | |
default=1024, | |
help=( | |
"The resolution for input images, all the images in the train/validation dataset will be resized to this" | |
" resolution" | |
), | |
) | |
parser.add_argument( | |
"--center_crop", | |
default=False, | |
action="store_true", | |
help=( | |
"Whether to center crop the input images to the resolution. If not set, the images will be randomly" | |
" cropped. The images will be resized to the resolution first before cropping." | |
), | |
) | |
parser.add_argument( | |
"--random_flip", | |
action="store_true", | |
help="whether to randomly flip images horizontally", | |
) | |
parser.add_argument( | |
"--train_text_encoder", | |
action="store_true", | |
help="Whether to train the text encoder. If set, the text encoder should be float32 precision.", | |
) | |
parser.add_argument( | |
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." | |
) | |
parser.add_argument("--num_train_epochs", type=int, default=100) | |
parser.add_argument( | |
"--max_train_steps", | |
type=int, | |
default=None, | |
help="Total number of training steps to perform. If provided, overrides num_train_epochs.", | |
) | |
parser.add_argument( | |
"--checkpointing_steps", | |
type=int, | |
default=500, | |
help=( | |
"Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" | |
" checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" | |
" training using `--resume_from_checkpoint`." | |
), | |
) | |
parser.add_argument( | |
"--checkpoints_total_limit", | |
type=int, | |
default=None, | |
help=("Max number of checkpoints to store."), | |
) | |
parser.add_argument( | |
"--resume_from_checkpoint", | |
type=str, | |
default=None, | |
help=( | |
"Whether training should be resumed from a previous checkpoint. Use a path saved by" | |
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' | |
), | |
) | |
parser.add_argument( | |
"--gradient_accumulation_steps", | |
type=int, | |
default=1, | |
help="Number of updates steps to accumulate before performing a backward/update pass.", | |
) | |
parser.add_argument( | |
"--gradient_checkpointing", | |
action="store_true", | |
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", | |
) | |
parser.add_argument( | |
"--learning_rate", | |
type=float, | |
default=1e-4, | |
help="Initial learning rate (after the potential warmup period) to use.", | |
) | |
parser.add_argument( | |
"--scale_lr", | |
action="store_true", | |
default=False, | |
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", | |
) | |
parser.add_argument( | |
"--lr_scheduler", | |
type=str, | |
default="constant", | |
help=( | |
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' | |
' "constant", "constant_with_warmup"]' | |
), | |
) | |
parser.add_argument( | |
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." | |
) | |
parser.add_argument( | |
"--snr_gamma", | |
type=float, | |
default=None, | |
help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " | |
"More details here: https://arxiv.org/abs/2303.09556.", | |
) | |
parser.add_argument( | |
"--allow_tf32", | |
action="store_true", | |
help=( | |
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" | |
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" | |
), | |
) | |
parser.add_argument( | |
"--dataloader_num_workers", | |
type=int, | |
default=0, | |
help=( | |
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." | |
), | |
) | |
parser.add_argument( | |
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." | |
) | |
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") | |
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") | |
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") | |
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") | |
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") | |
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") | |
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") | |
parser.add_argument( | |
"--prediction_type", | |
type=str, | |
default=None, | |
help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.", | |
) | |
parser.add_argument( | |
"--hub_model_id", | |
type=str, | |
default=None, | |
help="The name of the repository to keep in sync with the local `output_dir`.", | |
) | |
parser.add_argument( | |
"--logging_dir", | |
type=str, | |
default="logs", | |
help=( | |
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" | |
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." | |
), | |
) | |
parser.add_argument( | |
"--report_to", | |
type=str, | |
default="tensorboard", | |
help=( | |
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' | |
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' | |
), | |
) | |
parser.add_argument( | |
"--mixed_precision", | |
type=str, | |
default=None, | |
choices=["no", "fp16", "bf16"], | |
help=( | |
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" | |
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" | |
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." | |
), | |
) | |
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") | |
parser.add_argument( | |
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." | |
) | |
parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") | |
parser.add_argument( | |
"--rank", | |
type=int, | |
default=4, | |
help=("The dimension of the LoRA update matrices."), | |
) | |
parser.add_argument('--filename', default='train.txt') | |
parser.add_argument('--code_filename', default='train_caps_better_m8_k256.txt') | |
parser.add_argument('--repeat', default=1, type=int) | |
parser.add_argument('--scheduler_steps', default=1000, type=int, help='scheduler step, if turbo, set to 4') | |
parser.add_argument('--num_parts', type=int, default=4, help="Number of parts") | |
parser.add_argument('--num_k_per_part', type=int, default=256, help='Number of k') | |
parser.add_argument('--mapper_lr_scale', default=1, type=float) | |
parser.add_argument('--mapper_lr', default=0.0001, type=float) | |
parser.add_argument('--attn_loss', default=0, type=float) | |
parser.add_argument('--projection_nlayers', default=3, type=int) | |
parser.add_argument('--masked_training', action='store_true') | |
parser.add_argument('--drop_tokens', action='store_true') | |
parser.add_argument('--drop_rate', type=float, default=0.5) | |
parser.add_argument('--drop_counts', default='half') | |
parser.add_argument('--class_name', default='') | |
parser.add_argument('--no_pe', action='store_true') | |
parser.add_argument('--vector_shuffle', action='store_true') | |
parser.add_argument('--use_gt_label', action='store_true') | |
parser.add_argument('--bg_code', default=7, type=int) # for gt_label | |
parser.add_argument('--fg_idx', default=0, type=int) | |
parser.add_argument('--use_templates', action='store_true') | |
parser.add_argument('--filter_class', default=None, type=int, help='debugging purpose') | |
if input_args is not None: | |
args = parser.parse_args(input_args) | |
else: | |
args = parser.parse_args() | |
env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) | |
if env_local_rank != -1 and env_local_rank != args.local_rank: | |
args.local_rank = env_local_rank | |
# Sanity checks | |
if args.dataset_name is None and args.train_data_dir is None: | |
raise ValueError("Need either a dataset name or a training folder.") | |
return args | |
def unet_attn_processors_state_dict(unet) -> Dict[str, torch.tensor]: | |
""" | |
Returns: | |
a state dict containing just the attention processor parameters. | |
""" | |
attn_processors = get_attn_processors(unet) | |
attn_processors_state_dict = {} | |
for attn_processor_key, attn_processor in attn_processors.items(): | |
for parameter_key, parameter in attn_processor.state_dict().items(): | |
attn_processors_state_dict[f"{attn_processor_key}.{parameter_key}"] = parameter | |
return attn_processors_state_dict | |
def encode_prompt(text_encoders, text_input_ids_list, placeholder_token_ids, mapper_outputs): | |
prompt_embeds_list = [] | |
for i, text_encoder in enumerate(text_encoders): | |
text_input_ids = text_input_ids_list[i] | |
modified_hs = text_encoder.text_model.forward_embeddings_with_mapper(text_input_ids, | |
None, | |
mapper_outputs[i], | |
placeholder_token_ids) | |
prompt_embeds = text_encoder(text_input_ids, | |
hidden_states=modified_hs, | |
output_hidden_states=True) | |
# prompt_embeds = text_encoder( | |
# text_input_ids.to(text_encoder.device), | |
# output_hidden_states=True, | |
# ) | |
# We are only ALWAYS interested in the pooled output of the final text encoder | |
pooled_prompt_embeds = prompt_embeds[0] | |
prompt_embeds = prompt_embeds.hidden_states[-2] | |
bs_embed, seq_len, _ = prompt_embeds.shape | |
prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1) | |
prompt_embeds_list.append(prompt_embeds) | |
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) | |
pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1) | |
return prompt_embeds, pooled_prompt_embeds | |
def collate_fn(args, tokenizer_one, tokenizer_two, placeholder_token): | |
# Preprocessing the datasets. | |
train_resize = transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR) | |
train_crop = transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution) | |
train_flip = transforms.RandomHorizontalFlip(p=1.0) | |
train_transforms = transforms.Compose( | |
[ | |
transforms.ToTensor(), | |
transforms.Normalize([0.5], [0.5]), | |
] | |
) | |
def f(examples): | |
# image aug | |
original_sizes = [] | |
all_images = [] | |
crop_top_lefts = [] | |
captions = [] | |
raw_images = [] | |
appeared_tokens = [] | |
codes = [] | |
for i in range(len(examples)): | |
##### original sdxl process ##### | |
image = examples[i]['pixel_values'].convert('RGB') | |
original_sizes.append((image.height, image.width)) | |
image = train_resize(image) | |
if args.center_crop: | |
y1 = max(0, int(round((image.height - args.resolution) / 2.0))) | |
x1 = max(0, int(round((image.width - args.resolution) / 2.0))) | |
image = train_crop(image) | |
else: | |
y1, x1, h, w = train_crop.get_params(image, (args.resolution, args.resolution)) | |
image = crop(image, y1, x1, h, w) | |
if args.random_flip and random.random() < 0.5: | |
# flip | |
x1 = image.width - x1 | |
image = train_flip(image) | |
crop_top_left = (y1, x1) | |
crop_top_lefts.append(crop_top_left) | |
raw_images.append(image) | |
image = train_transforms(image) | |
all_images.append(image) | |
##### dreamcreature caption ##### | |
if args.use_templates and random.random() <= 0.5: # 50% using templates | |
if args.class_name != '': | |
caption = random.choice(IMAGENET_TEMPLATES).format(f'{placeholder_token} {args.class_name}') | |
else: | |
caption = random.choice(IMAGENET_TEMPLATES).format(placeholder_token) | |
else: | |
if args.class_name != '': | |
caption = f'{placeholder_token} {args.class_name}' | |
else: | |
caption = placeholder_token | |
tokens = tokenizer_one.token_map[placeholder_token][:args.num_parts] | |
tokens = [tokens[a] for a in examples[i]['appeared']] | |
if args.vector_shuffle or args.drop_tokens: | |
tokens = copy.copy(tokens) | |
random.shuffle(tokens) | |
if args.drop_tokens and random.random() < args.drop_rate and len(tokens) >= 2: | |
# randomly drop half of the tokens | |
if args.drop_counts == 'half': | |
tokens = tokens[:len(tokens) // 2] | |
else: | |
tokens = tokens[:int(args.drop_counts)] | |
caption = caption.replace(placeholder_token, ' '.join(tokens)) | |
captions.append(caption) | |
appeared = [int(t.split('_')[1]) for t in tokens] # <part>_i | |
# examples[i]['appeared'] = appeared | |
appeared_tokens.append(appeared) | |
code = examples[i]['codes'] | |
codes.append(code) | |
tokens_one = tokenize_prompt(tokenizer_one, captions) | |
tokens_two = tokenize_prompt(tokenizer_two, captions) | |
##### start stacking ##### | |
pixel_values = torch.stack([image for image in all_images]) | |
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() | |
original_sizes = [s for s in original_sizes] | |
crop_top_lefts = [c for c in crop_top_lefts] | |
input_ids_one = torch.stack([t for t in tokens_one]) | |
input_ids_two = torch.stack([t for t in tokens_two]) | |
codes = torch.stack(codes, dim=0) | |
collate_output = { | |
"original_sizes": original_sizes, | |
"crop_top_lefts": crop_top_lefts, | |
"pixel_values": pixel_values, | |
"input_ids_one": input_ids_one, | |
"input_ids_two": input_ids_two, | |
"raw_images": raw_images, | |
"appeared_tokens": appeared_tokens, | |
"codes": codes | |
} | |
return collate_output | |
return f | |
def setup_attn_processors(unet, args): | |
attn_size = args.resolution // 32 | |
attn_procs = {} | |
for name in unet.attn_processors.keys(): | |
attn_procs[name] = AttnProcessorCustom(attn_size) | |
unet.set_attn_processor(attn_procs) | |
def init_for_pipeline(args): | |
tokenizer_one = MultiTokenCLIPTokenizer.from_pretrained( | |
args.pretrained_model_name_or_path, | |
subfolder="tokenizer", | |
revision=args.revision, | |
use_fast=False, | |
) | |
tokenizer_two = MultiTokenCLIPTokenizer.from_pretrained( | |
args.pretrained_model_name_or_path, | |
subfolder="tokenizer_2", | |
revision=args.revision, | |
use_fast=False, | |
) | |
text_encoder_cls_one = CustomCLIPTextModel | |
text_encoder_cls_two = CustomCLIPTextModelWithProjection | |
text_encoder_one = text_encoder_cls_one.from_pretrained( | |
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant | |
) | |
text_encoder_two = text_encoder_cls_two.from_pretrained( | |
args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant | |
) | |
OUT_DIMS = 768 + 1280 # 2048 | |
simple_mapper = TokenMapper(args.num_parts, | |
args.num_k_per_part, | |
OUT_DIMS, | |
args.projection_nlayers) | |
return text_encoder_one, text_encoder_two, tokenizer_one, tokenizer_two, simple_mapper | |
def main(args): | |
logging_dir = Path(args.output_dir, args.logging_dir) | |
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) | |
kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) | |
accelerator = Accelerator( | |
gradient_accumulation_steps=args.gradient_accumulation_steps, | |
mixed_precision=args.mixed_precision, | |
log_with=args.report_to, | |
project_config=accelerator_project_config, | |
kwargs_handlers=[kwargs], | |
) | |
if args.report_to == "wandb": | |
if not is_wandb_available(): | |
raise ImportError("Make sure to install wandb if you want to use it for logging during training.") | |
import wandb | |
# Make one log on every process with the configuration for debugging. | |
logging.basicConfig( | |
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", | |
datefmt="%m/%d/%Y %H:%M:%S", | |
level=logging.INFO, | |
) | |
logger.info(accelerator.state, main_process_only=False) | |
if accelerator.is_local_main_process: | |
datasets.utils.logging.set_verbosity_warning() | |
transformers.utils.logging.set_verbosity_warning() | |
diffusers.utils.logging.set_verbosity_info() | |
else: | |
datasets.utils.logging.set_verbosity_error() | |
transformers.utils.logging.set_verbosity_error() | |
diffusers.utils.logging.set_verbosity_error() | |
# If passed along, set the training seed now. | |
if args.seed is not None: | |
set_seed(args.seed) | |
# Handle the repository creation | |
if accelerator.is_main_process: | |
if args.output_dir is not None: | |
os.makedirs(args.output_dir, exist_ok=True) | |
if args.push_to_hub: | |
repo_id = create_repo( | |
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token | |
).repo_id | |
# Load the tokenizers (replace AutoTokenizer with the custom MultiTokenCLIPTokenizer) | |
tokenizer_one = MultiTokenCLIPTokenizer.from_pretrained( | |
args.pretrained_model_name_or_path, | |
subfolder="tokenizer", | |
revision=args.revision, | |
use_fast=False, | |
) | |
tokenizer_two = MultiTokenCLIPTokenizer.from_pretrained( | |
args.pretrained_model_name_or_path, | |
subfolder="tokenizer_2", | |
revision=args.revision, | |
use_fast=False, | |
) | |
# import correct text encoder classes | |
# text_encoder_cls_one = import_model_class_from_model_name_or_path( | |
# args.pretrained_model_name_or_path, args.revision | |
# ) | |
# text_encoder_cls_two = import_model_class_from_model_name_or_path( | |
# args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" | |
# ) | |
text_encoder_cls_one = CustomCLIPTextModel | |
text_encoder_cls_two = CustomCLIPTextModelWithProjection | |
# Load scheduler and models | |
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, | |
subfolder="scheduler", | |
num_train_steps=args.scheduler_steps) | |
text_encoder_one = text_encoder_cls_one.from_pretrained( | |
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant | |
) | |
text_encoder_two = text_encoder_cls_two.from_pretrained( | |
args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant | |
) | |
vae_path = ( | |
args.pretrained_model_name_or_path | |
if args.pretrained_vae_model_name_or_path is None | |
else args.pretrained_vae_model_name_or_path | |
) | |
vae = AutoencoderKL.from_pretrained( | |
vae_path, | |
subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, | |
revision=args.revision, | |
variant=args.variant, | |
) | |
unet = UNet2DConditionModel.from_pretrained( | |
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant | |
) | |
##### dreamcreature init ##### | |
OUT_DIMS = 768 + 1280 # 2048 | |
dino = DINO() | |
seg = KMeansSegmentation(args.train_data_dir + '/pretrained_kmeans.pth', | |
args.fg_idx, | |
args.bg_code, | |
args.num_parts, | |
args.num_k_per_part) | |
simple_mapper = TokenMapper(args.num_parts, | |
args.num_k_per_part, | |
OUT_DIMS, | |
args.projection_nlayers) | |
# We only train the additional adapter LoRA layers | |
vae.requires_grad_(False) | |
text_encoder_one.requires_grad_(False) | |
text_encoder_two.requires_grad_(False) | |
unet.requires_grad_(False) | |
dino.requires_grad_(False) | |
##### dreamcreature, add sub-concepts token ids #### | |
placeholder_token = "<part>" | |
initializer_token = None | |
placeholder_token_ids_one = add_tokens(tokenizer_one, | |
text_encoder_one, | |
placeholder_token, | |
args.num_parts, | |
initializer_token) | |
placeholder_token_ids_two = add_tokens(tokenizer_two, | |
text_encoder_two, | |
placeholder_token, | |
args.num_parts, | |
initializer_token) | |
# For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision | |
# as these weights are only used for inference, keeping weights in full precision is not required. | |
weight_dtype = torch.float32 | |
if accelerator.mixed_precision == "fp16": | |
weight_dtype = torch.float16 | |
elif accelerator.mixed_precision == "bf16": | |
weight_dtype = torch.bfloat16 | |
# Move unet, vae and text_encoder to device and cast to weight_dtype | |
# The VAE is in float32 to avoid NaN losses. | |
unet.to(accelerator.device, dtype=weight_dtype) | |
if args.pretrained_vae_model_name_or_path is None: | |
vae.to(accelerator.device, dtype=torch.float32) | |
else: | |
vae.to(accelerator.device, dtype=weight_dtype) | |
text_encoder_one.to(accelerator.device, dtype=weight_dtype) | |
text_encoder_two.to(accelerator.device, dtype=weight_dtype) | |
simple_mapper.to(accelerator.device) | |
if args.enable_xformers_memory_efficient_attention: | |
if is_xformers_available(): | |
import xformers | |
xformers_version = version.parse(xformers.__version__) | |
if xformers_version == version.parse("0.0.16"): | |
logger.warn( | |
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." | |
) | |
unet.enable_xformers_memory_efficient_attention() | |
else: | |
raise ValueError("xformers is not available. Make sure it is installed correctly") | |
# now we will add new LoRA weights to the attention layers | |
# Set correct lora layers | |
unet_lora_parameters = [] | |
for attn_processor_name, attn_processor in unet.attn_processors.items(): | |
# Parse the attention module. | |
attn_module = unet | |
for n in attn_processor_name.split(".")[:-1]: | |
attn_module = getattr(attn_module, n) | |
# Set the `lora_layer` attribute of the attention-related matrices. | |
attn_module.to_q.set_lora_layer( | |
LoRALinearLayer( | |
in_features=attn_module.to_q.in_features, out_features=attn_module.to_q.out_features, rank=args.rank | |
) | |
) | |
attn_module.to_k.set_lora_layer( | |
LoRALinearLayer( | |
in_features=attn_module.to_k.in_features, out_features=attn_module.to_k.out_features, rank=args.rank | |
) | |
) | |
attn_module.to_v.set_lora_layer( | |
LoRALinearLayer( | |
in_features=attn_module.to_v.in_features, out_features=attn_module.to_v.out_features, rank=args.rank | |
) | |
) | |
attn_module.to_out[0].set_lora_layer( | |
LoRALinearLayer( | |
in_features=attn_module.to_out[0].in_features, | |
out_features=attn_module.to_out[0].out_features, | |
rank=args.rank, | |
) | |
) | |
# Accumulate the LoRA params to optimize. | |
unet_lora_parameters.extend(attn_module.to_q.lora_layer.parameters()) | |
unet_lora_parameters.extend(attn_module.to_k.lora_layer.parameters()) | |
unet_lora_parameters.extend(attn_module.to_v.lora_layer.parameters()) | |
unet_lora_parameters.extend(attn_module.to_out[0].lora_layer.parameters()) | |
setup_attn_processors(unet, args) | |
# The text encoder comes from 🤗 transformers, so we cannot directly modify it. | |
# So, instead, we monkey-patch the forward calls of its attention-blocks. | |
if args.train_text_encoder: | |
# ensure that dtype is float32, even if rest of the model that isn't trained is loaded in fp16 | |
text_lora_parameters_one = LoraLoaderMixin._modify_text_encoder( | |
text_encoder_one, dtype=torch.float32, rank=args.rank | |
) | |
text_lora_parameters_two = LoraLoaderMixin._modify_text_encoder( | |
text_encoder_two, dtype=torch.float32, rank=args.rank | |
) | |
# create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format | |
def save_model_hook(models, weights, output_dir): | |
if accelerator.is_main_process: | |
# there are only two options here. Either are just the unet attn processor layers | |
# or there are the unet and text encoder atten layers | |
unet_lora_layers_to_save = None | |
text_encoder_one_lora_layers_to_save = None | |
text_encoder_two_lora_layers_to_save = None | |
mapper_to_save = None | |
for model in models: | |
if isinstance(model, type(accelerator.unwrap_model(unet))): | |
unet_lora_layers_to_save = unet_attn_processors_state_dict(model) | |
elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))): | |
text_encoder_one_lora_layers_to_save = text_encoder_lora_state_dict(model) | |
elif isinstance(model, type(accelerator.unwrap_model(text_encoder_two))): | |
text_encoder_two_lora_layers_to_save = text_encoder_lora_state_dict(model) | |
elif isinstance(model, TokenMapper): | |
mapper_to_save = model.state_dict() | |
else: | |
raise ValueError(f"unexpected save model: {model.__class__}") | |
# make sure to pop weight so that corresponding model is not saved again | |
weights.pop() | |
StableDiffusionXLPipeline.save_lora_weights( | |
output_dir, | |
unet_lora_layers=unet_lora_layers_to_save, | |
text_encoder_lora_layers=text_encoder_one_lora_layers_to_save, | |
text_encoder_2_lora_layers=text_encoder_two_lora_layers_to_save, | |
) | |
torch.save(mapper_to_save, output_dir + '/hash_mapper.pth') | |
def load_model_hook(models, input_dir): | |
unet_ = None | |
text_encoder_one_ = None | |
text_encoder_two_ = None | |
mapper_ = None | |
while len(models) > 0: | |
model = models.pop() | |
if isinstance(model, type(accelerator.unwrap_model(unet))): | |
unet_ = model | |
elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))): | |
text_encoder_one_ = model | |
elif isinstance(model, type(accelerator.unwrap_model(text_encoder_two))): | |
text_encoder_two_ = model | |
elif isinstance(model, TokenMapper): | |
mapper_ = model | |
else: | |
raise ValueError(f"unexpected save model: {model.__class__}") | |
lora_state_dict, network_alphas = LoraLoaderMixin.lora_state_dict(input_dir) | |
LoraLoaderMixin.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=unet_) | |
text_encoder_state_dict = {k: v for k, v in lora_state_dict.items() if "text_encoder." in k} | |
LoraLoaderMixin.load_lora_into_text_encoder( | |
text_encoder_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_one_ | |
) | |
text_encoder_2_state_dict = {k: v for k, v in lora_state_dict.items() if "text_encoder_2." in k} | |
LoraLoaderMixin.load_lora_into_text_encoder( | |
text_encoder_2_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_two_ | |
) | |
mapper_.load_state_dict(torch.load(input_dir + '/hash_mapper.pth')) | |
accelerator.register_save_state_pre_hook(save_model_hook) | |
accelerator.register_load_state_pre_hook(load_model_hook) | |
# Enable TF32 for faster training on Ampere GPUs, | |
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices | |
if args.allow_tf32: | |
torch.backends.cuda.matmul.allow_tf32 = True | |
if args.scale_lr: | |
args.learning_rate = ( | |
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes | |
) | |
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs | |
if args.use_8bit_adam: | |
try: | |
import bitsandbytes as bnb | |
except ImportError: | |
raise ImportError( | |
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." | |
) | |
optimizer_class = bnb.optim.AdamW8bit | |
else: | |
optimizer_class = torch.optim.AdamW | |
extra_params = list(simple_mapper.parameters()) | |
mapper_lr = args.learning_rate * args.mapper_lr_scale if args.learning_rate != 0 else args.mapper_lr | |
# Optimizer creation | |
params_to_optimize = ( | |
itertools.chain(unet_lora_parameters, text_lora_parameters_one, text_lora_parameters_two) | |
if args.train_text_encoder | |
else unet_lora_parameters | |
) | |
optimizer = optimizer_class( | |
[{'params': params_to_optimize}, | |
{'params': extra_params, 'lr': mapper_lr}], | |
lr=args.learning_rate, | |
betas=(args.adam_beta1, args.adam_beta2), | |
weight_decay=args.adam_weight_decay, | |
eps=args.adam_epsilon, | |
) | |
# create | |
train_dataset = DreamCreatureDataset(args.train_data_dir, | |
args.filename, | |
code_filename=args.code_filename, | |
num_parts=args.num_parts, | |
num_k_per_part=args.num_k_per_part, | |
repeat=args.repeat, | |
use_gt_label=args.use_gt_label, | |
bg_code=args.bg_code) | |
with accelerator.main_process_first(): | |
if args.filter_class is not None: | |
train_dataset.filter_by_class(args.filter_class) | |
print('selected', len(train_dataset)) | |
if args.max_train_samples is not None: | |
train_dataset.set_max_samples(args.max_train_samples, args.seed) | |
# DataLoaders creation: | |
train_dataloader = torch.utils.data.DataLoader( | |
train_dataset, | |
shuffle=True, | |
collate_fn=collate_fn(args, tokenizer_one, tokenizer_two, placeholder_token), | |
batch_size=args.train_batch_size, | |
num_workers=args.dataloader_num_workers, | |
) | |
# Scheduler and math around the number of training steps. | |
overrode_max_train_steps = False | |
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) | |
if args.max_train_steps is None: | |
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch | |
overrode_max_train_steps = True | |
lr_scheduler = get_scheduler( | |
args.lr_scheduler, | |
optimizer=optimizer, | |
num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, | |
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, | |
) | |
# Prepare everything with our `accelerator`. | |
if args.train_text_encoder: | |
unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( | |
unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler | |
) | |
else: | |
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( | |
unet, optimizer, train_dataloader, lr_scheduler | |
) | |
simple_mapper = accelerator.prepare(simple_mapper) | |
# We need to recalculate our total training steps as the size of the training dataloader may have changed. | |
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) | |
if overrode_max_train_steps: | |
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch | |
# Afterwards we recalculate our number of training epochs | |
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) | |
# We need to initialize the trackers we use, and also store our configuration. | |
# The trackers initializes automatically on the main process. | |
if accelerator.is_main_process: | |
accelerator.init_trackers("text2image-fine-tune", config=vars(args)) | |
# Train! | |
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps | |
logger.info("***** Running training *****") | |
logger.info(f" Num examples = {len(train_dataset)}") | |
logger.info(f" Num Epochs = {args.num_train_epochs}") | |
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") | |
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") | |
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") | |
logger.info(f" Total optimization steps = {args.max_train_steps}") | |
global_step = 0 | |
first_epoch = 0 | |
# Potentially load in the weights and states from a previous save | |
if args.resume_from_checkpoint: | |
if args.resume_from_checkpoint != "latest": | |
path = os.path.basename(args.resume_from_checkpoint) | |
else: | |
# Get the most recent checkpoint | |
dirs = os.listdir(args.output_dir) | |
dirs = [d for d in dirs if d.startswith("checkpoint")] | |
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) | |
path = dirs[-1] if len(dirs) > 0 else None | |
if path is None: | |
accelerator.print( | |
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." | |
) | |
args.resume_from_checkpoint = None | |
initial_global_step = 0 | |
else: | |
accelerator.print(f"Resuming from checkpoint {path}") | |
accelerator.load_state(os.path.join(args.output_dir, path)) | |
global_step = int(path.split("-")[1]) | |
initial_global_step = global_step | |
first_epoch = global_step // num_update_steps_per_epoch | |
else: | |
initial_global_step = 0 | |
progress_bar = tqdm( | |
range(0, args.max_train_steps), | |
initial=initial_global_step, | |
desc="Steps", | |
# Only show the progress bar once on each machine. | |
disable=not accelerator.is_local_main_process, | |
) | |
for epoch in range(first_epoch, args.num_train_epochs): | |
unet.train() | |
if args.train_text_encoder: | |
text_encoder_one.train() | |
text_encoder_two.train() | |
train_loss = 0.0 | |
train_diff_loss = 0.0 | |
train_attn_loss = 0.0 | |
for step, batch in enumerate(train_dataloader): | |
with accelerator.accumulate(unet, simple_mapper): | |
# Convert images to latent space | |
if args.pretrained_vae_model_name_or_path is not None: | |
pixel_values = batch["pixel_values"].to(dtype=weight_dtype) | |
else: | |
pixel_values = batch["pixel_values"] | |
model_input = vae.encode(pixel_values).latent_dist.sample() | |
model_input = model_input * vae.config.scaling_factor | |
if args.pretrained_vae_model_name_or_path is None: | |
model_input = model_input.to(weight_dtype) | |
# Sample noise that we'll add to the latents | |
noise = torch.randn_like(model_input) | |
if args.noise_offset: | |
# https://www.crosslabs.org//blog/diffusion-with-offset-noise | |
noise += args.noise_offset * torch.randn( | |
(model_input.shape[0], model_input.shape[1], 1, 1), device=model_input.device | |
) | |
bsz = model_input.shape[0] | |
# Sample a random timestep for each image | |
timesteps = torch.randint( | |
0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device | |
) | |
timesteps = timesteps.long() | |
# Add noise to the model input according to the noise magnitude at each timestep | |
# (this is the forward diffusion process) | |
noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps) | |
# time ids | |
def compute_time_ids(original_size, crops_coords_top_left): | |
# Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids | |
target_size = (args.resolution, args.resolution) | |
add_time_ids = list(original_size + crops_coords_top_left + target_size) | |
add_time_ids = torch.tensor([add_time_ids]) | |
add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype) | |
return add_time_ids | |
add_time_ids = torch.cat( | |
[compute_time_ids(s, c) for s, c in zip(batch["original_sizes"], batch["crop_top_lefts"])] | |
) | |
# Predict the noise residual | |
unet_added_conditions = {"time_ids": add_time_ids} | |
# prompt_embeds, pooled_prompt_embeds = encode_prompt( | |
# text_encoders=[text_encoder_one, text_encoder_two], | |
# tokenizers=None, | |
# prompt=None, | |
# text_input_ids_list=[batch["input_ids_one"], batch["input_ids_two"]], | |
# ) | |
mapper_outputs = simple_mapper(batch['codes']) | |
prompt_embeds, pooled_prompt_embeds = encode_prompt( | |
text_encoders=[text_encoder_one, text_encoder_two], | |
text_input_ids_list=[batch["input_ids_one"], batch["input_ids_two"]], | |
placeholder_token_ids=placeholder_token_ids_one, | |
mapper_outputs=[mapper_outputs[..., :768], mapper_outputs[..., 768:]] | |
) | |
unet_added_conditions.update({"text_embeds": pooled_prompt_embeds}) | |
model_pred = unet( | |
noisy_model_input, timesteps, prompt_embeds, added_cond_kwargs=unet_added_conditions | |
).sample | |
# Get the target for loss depending on the prediction type | |
if args.prediction_type is not None: | |
# set prediction_type of scheduler if defined | |
noise_scheduler.register_to_config(prediction_type=args.prediction_type) | |
if noise_scheduler.config.prediction_type == "epsilon": | |
target = noise | |
elif noise_scheduler.config.prediction_type == "v_prediction": | |
target = noise_scheduler.get_velocity(model_input, noise, timesteps) | |
else: | |
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") | |
if args.snr_gamma is None: | |
loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") | |
attn_loss, max_attn = dreamcreature_loss(batch, | |
unet, | |
dino, | |
seg, | |
placeholder_token_ids_one, | |
accelerator) | |
if args.masked_training: | |
masks = batch['masks'].unsqueeze(1).to(accelerator.device) | |
loss_image_mask = F.interpolate(masks.float(), | |
size=target.shape[-2:], | |
mode='bilinear') * torch.ones_like(target) | |
loss = loss * loss_image_mask | |
loss = loss.sum() / loss_image_mask.sum() | |
else: | |
loss = loss.mean() | |
else: | |
# Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. | |
# Since we predict the noise instead of x_0, the original formulation is slightly changed. | |
# This is discussed in Section 4.2 of the same paper. | |
snr = compute_snr(noise_scheduler, timesteps) | |
if noise_scheduler.config.prediction_type == "v_prediction": | |
# Velocity objective requires that we add one to SNR values before we divide by them. | |
snr = snr + 1 | |
mse_loss_weights = ( | |
torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min(dim=1)[0] / snr | |
) | |
loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") | |
attn_loss, max_attn = dreamcreature_loss(batch, | |
unet, | |
dino, | |
seg, | |
placeholder_token_ids_one, | |
accelerator) | |
if args.masked_training: | |
masks = batch['masks'].unsqueeze(1).to(accelerator.device) | |
loss_image_mask = F.interpolate(masks.float(), | |
size=target.shape[-2:], | |
mode='bilinear') * torch.ones_like(target) | |
loss = loss * loss_image_mask | |
loss = loss.sum(dim=list(range(1, len(loss.shape)))) * mse_loss_weights | |
loss = loss.sum() / loss_image_mask.sum() | |
else: | |
loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights | |
loss = loss.mean() | |
diff_loss = loss.clone().detach() | |
avg_diff_loss = accelerator.gather(diff_loss.repeat(args.train_batch_size)).mean() | |
train_diff_loss += avg_diff_loss.item() / args.gradient_accumulation_steps | |
avg_attn_loss = accelerator.gather(attn_loss.repeat(args.train_batch_size)).mean() | |
train_attn_loss += avg_attn_loss.item() / args.gradient_accumulation_steps | |
loss += args.attn_loss * attn_loss | |
# Gather the losses across all processes for logging (if we use distributed training). | |
avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() | |
train_loss += avg_loss.item() / args.gradient_accumulation_steps | |
# Backpropagate | |
accelerator.backward(loss) | |
if accelerator.sync_gradients: | |
params_to_clip = ( | |
itertools.chain(unet_lora_parameters, text_lora_parameters_one, text_lora_parameters_two) | |
if args.train_text_encoder | |
else unet_lora_parameters | |
) | |
params_to_clip = list(params_to_clip) + extra_params | |
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) | |
optimizer.step() | |
lr_scheduler.step() | |
optimizer.zero_grad() | |
# Checks if the accelerator has performed an optimization step behind the scenes | |
if accelerator.sync_gradients: | |
progress_bar.update(1) | |
global_step += 1 | |
accelerator.log({"train_loss": train_loss, | |
"diff_loss": train_diff_loss, | |
"attn_loss": train_attn_loss, | |
"max_attn": max_attn.item() | |
}, step=global_step) | |
train_loss = 0.0 | |
train_attn_loss = 0.0 | |
train_diff_loss = 0.0 | |
if accelerator.is_main_process: | |
if global_step % args.checkpointing_steps == 0: | |
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit` | |
if args.checkpoints_total_limit is not None: | |
checkpoints = os.listdir(args.output_dir) | |
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] | |
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) | |
# before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints | |
if len(checkpoints) >= args.checkpoints_total_limit: | |
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 | |
removing_checkpoints = checkpoints[0:num_to_remove] | |
logger.info( | |
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" | |
) | |
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") | |
for removing_checkpoint in removing_checkpoints: | |
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) | |
shutil.rmtree(removing_checkpoint) | |
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") | |
accelerator.save_state(save_path) | |
logger.info(f"Saved state to {save_path}") | |
logs = {"step_loss": diff_loss.detach().item(), | |
"attn_loss": attn_loss.detach().item(), | |
"lr": lr_scheduler.get_last_lr()[0]} | |
progress_bar.set_postfix(**logs) | |
if global_step >= args.max_train_steps: | |
break | |
if accelerator.is_main_process: | |
# todo: change pipeline | |
if args.validation_prompt is not None and epoch % args.validation_epochs == 0: | |
logger.info( | |
f"Running validation... \n Generating {args.num_validation_images} images with prompt:" | |
f" {args.validation_prompt}." | |
) | |
# create pipeline | |
pipeline = DreamCreatureSDXLPipeline.from_pretrained( | |
args.pretrained_model_name_or_path, | |
vae=vae, | |
tokenizer=tokenizer_one, | |
tokenizer_2=tokenizer_two, | |
text_encoder=accelerator.unwrap_model(text_encoder_one), | |
text_encoder_2=accelerator.unwrap_model(text_encoder_two), | |
unet=accelerator.unwrap_model(unet), | |
revision=args.revision, | |
variant=args.variant, | |
torch_dtype=weight_dtype, | |
) | |
pipeline.placeholder_token_ids = placeholder_token_ids_one | |
pipeline.simple_mapper = accelerator.unwrap_model(simple_mapper) | |
pipeline.replace_token = False | |
pipeline = pipeline.to(accelerator.device) | |
pipeline.set_progress_bar_config(disable=True) | |
# run inference | |
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None | |
pipeline_args = {"prompt": args.validation_prompt} | |
num_steps = 4 if 'turbo' in args.pretrained_model_name_or_path else 25 | |
gs = 0 if 'turbo' in args.pretrained_model_name_or_path else 5.0 | |
images = [ | |
pipeline(**pipeline_args, num_inference_steps=num_steps, guidance_scale=gs, | |
generator=generator, height=args.resolution, width=args.resolution).images[0] | |
for _ in range(args.num_validation_images) | |
] | |
for tracker in accelerator.trackers: | |
if tracker.name == "tensorboard": | |
np_images = np.stack([np.asarray(img) for img in images]) | |
tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") | |
if tracker.name == "wandb": | |
tracker.log( | |
{ | |
"validation": [ | |
wandb.Image(image, caption=f"{i}: {args.validation_prompt}") | |
for i, image in enumerate(images) | |
] | |
} | |
) | |
del pipeline | |
torch.cuda.empty_cache() | |
# Save the lora layers | |
accelerator.wait_for_everyone() | |
if accelerator.is_main_process: | |
unet = accelerator.unwrap_model(unet) | |
unet_lora_layers = unet_attn_processors_state_dict(unet) | |
if args.train_text_encoder: | |
text_encoder_one = accelerator.unwrap_model(text_encoder_one) | |
text_encoder_lora_layers = text_encoder_lora_state_dict(text_encoder_one) | |
text_encoder_two = accelerator.unwrap_model(text_encoder_two) | |
text_encoder_2_lora_layers = text_encoder_lora_state_dict(text_encoder_two) | |
else: | |
text_encoder_lora_layers = None | |
text_encoder_2_lora_layers = None | |
StableDiffusionXLPipeline.save_lora_weights( | |
save_directory=args.output_dir, | |
unet_lora_layers=unet_lora_layers, | |
text_encoder_lora_layers=text_encoder_lora_layers, | |
text_encoder_2_lora_layers=text_encoder_2_lora_layers, | |
) | |
torch.save(simple_mapper.to(torch.float32).state_dict(), args.output_dir + '/hash_mapper.pth') | |
del unet | |
del text_encoder_one | |
del text_encoder_two | |
del text_encoder_lora_layers | |
del text_encoder_2_lora_layers | |
del simple_mapper | |
torch.cuda.empty_cache() | |
# Final inference | |
# Load previous pipeline | |
text_encoder_one, text_encoder_two, tokenizer_one, tokenizer_two, simple_mapper = init_for_pipeline(args) | |
pipeline = DreamCreatureSDXLPipeline.from_pretrained( | |
args.pretrained_model_name_or_path, | |
vae=vae, | |
tokenizer=tokenizer_one, | |
tokenizer_2=tokenizer_two, | |
text_encoder=text_encoder_one, | |
text_encoder_2=text_encoder_two, | |
revision=args.revision, | |
variant=args.variant, | |
torch_dtype=weight_dtype, | |
) | |
pipeline.placeholder_token_ids = placeholder_token_ids_one | |
pipeline.replace_token = False | |
pipeline.simple_mapper = simple_mapper | |
pipeline.simple_mapper.load_state_dict(torch.load(args.output_dir + '/hash_mapper.pth', map_location='cpu')) | |
pipeline.simple_mapper.to(accelerator.device) | |
setup_attn_processors(pipeline.unet, args) | |
pipeline = pipeline.to(accelerator.device) | |
# load attention processors | |
pipeline.load_lora_weights(args.output_dir) | |
# run inference | |
images = [] | |
if args.validation_prompt and args.num_validation_images > 0: | |
num_steps = 4 if 'turbo' in args.pretrained_model_name_or_path else 25 | |
gs = 0 if 'turbo' in args.pretrained_model_name_or_path else 5.0 | |
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None | |
images = [ | |
pipeline(args.validation_prompt, num_inference_steps=num_steps, | |
guidance_scale=gs, generator=generator, height=args.resolution, | |
width=args.resolution).images[0] | |
for _ in range(args.num_validation_images) | |
] | |
for tracker in accelerator.trackers: | |
if tracker.name == "tensorboard": | |
np_images = np.stack([np.asarray(img) for img in images]) | |
tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC") | |
if tracker.name == "wandb": | |
tracker.log( | |
{ | |
"test": [ | |
wandb.Image(image, caption=f"{i}: {args.validation_prompt}") | |
for i, image in enumerate(images) | |
] | |
} | |
) | |
if args.push_to_hub: | |
save_model_card( | |
repo_id, | |
images=images, | |
base_model=args.pretrained_model_name_or_path, | |
dataset_name=args.dataset_name, | |
train_text_encoder=args.train_text_encoder, | |
repo_folder=args.output_dir, | |
vae_path=args.pretrained_vae_model_name_or_path, | |
) | |
upload_folder( | |
repo_id=repo_id, | |
folder_path=args.output_dir, | |
commit_message="End of training", | |
ignore_patterns=["step_*", "epoch_*"], | |
) | |
accelerator.end_training() | |
if __name__ == "__main__": | |
args = parse_args() | |
main(args) | |