|
import os |
|
import random |
|
from collections import OrderedDict |
|
from typing import Union, Literal, List, Optional |
|
|
|
import numpy as np |
|
from diffusers import T2IAdapter, AutoencoderTiny, ControlNetModel |
|
|
|
import torch.functional as F |
|
from safetensors.torch import load_file |
|
from torch.utils.data import DataLoader, ConcatDataset |
|
|
|
from toolkit import train_tools |
|
from toolkit.basic import value_map, adain, get_mean_std |
|
from toolkit.clip_vision_adapter import ClipVisionAdapter |
|
from toolkit.config_modules import GuidanceConfig |
|
from toolkit.data_loader import get_dataloader_datasets |
|
from toolkit.data_transfer_object.data_loader import DataLoaderBatchDTO, FileItemDTO |
|
from toolkit.guidance import get_targeted_guidance_loss, get_guidance_loss, GuidanceType |
|
from toolkit.image_utils import show_tensors, show_latents |
|
from toolkit.ip_adapter import IPAdapter |
|
from toolkit.custom_adapter import CustomAdapter |
|
from toolkit.prompt_utils import PromptEmbeds, concat_prompt_embeds |
|
from toolkit.reference_adapter import ReferenceAdapter |
|
from toolkit.stable_diffusion_model import StableDiffusion, BlankNetwork |
|
from toolkit.train_tools import get_torch_dtype, apply_snr_weight, add_all_snr_to_noise_scheduler, \ |
|
apply_learnable_snr_gos, LearnableSNRGamma |
|
import gc |
|
import torch |
|
from jobs.process import BaseSDTrainProcess |
|
from torchvision import transforms |
|
from diffusers import EMAModel |
|
import math |
|
from toolkit.train_tools import precondition_model_outputs_flow_match |
|
|
|
|
|
def flush(): |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
|
|
|
|
adapter_transforms = transforms.Compose([ |
|
transforms.ToTensor(), |
|
]) |
|
|
|
|
|
class SDTrainer(BaseSDTrainProcess): |
|
|
|
def __init__(self, process_id: int, job, config: OrderedDict, **kwargs): |
|
super().__init__(process_id, job, config, **kwargs) |
|
self.assistant_adapter: Union['T2IAdapter', 'ControlNetModel', None] |
|
self.do_prior_prediction = False |
|
self.do_long_prompts = False |
|
self.do_guided_loss = False |
|
self.taesd: Optional[AutoencoderTiny] = None |
|
|
|
self._clip_image_embeds_unconditional: Union[List[str], None] = None |
|
self.negative_prompt_pool: Union[List[str], None] = None |
|
self.batch_negative_prompt: Union[List[str], None] = None |
|
|
|
self.scaler = torch.cuda.amp.GradScaler() |
|
|
|
self.is_bfloat = self.train_config.dtype == "bfloat16" or self.train_config.dtype == "bf16" |
|
|
|
self.do_grad_scale = True |
|
if self.is_fine_tuning: |
|
self.do_grad_scale = False |
|
if self.adapter_config is not None: |
|
if self.adapter_config.train: |
|
self.do_grad_scale = False |
|
|
|
if self.train_config.dtype in ["fp16", "float16"]: |
|
|
|
org_unscale_grads = self.scaler._unscale_grads_ |
|
def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16): |
|
return org_unscale_grads(optimizer, inv_scale, found_inf, True) |
|
self.scaler._unscale_grads_ = _unscale_grads_replacer |
|
|
|
|
|
def before_model_load(self): |
|
pass |
|
|
|
def before_dataset_load(self): |
|
self.assistant_adapter = None |
|
|
|
if self.train_config.adapter_assist_name_or_path is not None: |
|
adapter_path = self.train_config.adapter_assist_name_or_path |
|
|
|
if self.train_config.adapter_assist_type == "t2i": |
|
|
|
self.assistant_adapter = T2IAdapter.from_pretrained( |
|
adapter_path, torch_dtype=get_torch_dtype(self.train_config.dtype) |
|
).to(self.device_torch) |
|
elif self.train_config.adapter_assist_type == "control_net": |
|
self.assistant_adapter = ControlNetModel.from_pretrained( |
|
adapter_path, torch_dtype=get_torch_dtype(self.train_config.dtype) |
|
).to(self.device_torch, dtype=get_torch_dtype(self.train_config.dtype)) |
|
else: |
|
raise ValueError(f"Unknown adapter assist type {self.train_config.adapter_assist_type}") |
|
|
|
self.assistant_adapter.eval() |
|
self.assistant_adapter.requires_grad_(False) |
|
flush() |
|
if self.train_config.train_turbo and self.train_config.show_turbo_outputs: |
|
if self.model_config.is_xl: |
|
self.taesd = AutoencoderTiny.from_pretrained("madebyollin/taesdxl", |
|
torch_dtype=get_torch_dtype(self.train_config.dtype)) |
|
else: |
|
self.taesd = AutoencoderTiny.from_pretrained("madebyollin/taesd", |
|
torch_dtype=get_torch_dtype(self.train_config.dtype)) |
|
self.taesd.to(dtype=get_torch_dtype(self.train_config.dtype), device=self.device_torch) |
|
self.taesd.eval() |
|
self.taesd.requires_grad_(False) |
|
|
|
def hook_before_train_loop(self): |
|
if self.train_config.do_prior_divergence: |
|
self.do_prior_prediction = True |
|
|
|
if not self.is_latents_cached: |
|
self.sd.vae.eval() |
|
self.sd.vae.to(self.device_torch) |
|
else: |
|
|
|
self.sd.vae.to('cpu') |
|
flush() |
|
add_all_snr_to_noise_scheduler(self.sd.noise_scheduler, self.device_torch) |
|
if self.adapter is not None: |
|
self.adapter.to(self.device_torch) |
|
|
|
|
|
has_reg = self.datasets_reg is not None and len(self.datasets_reg) > 0 |
|
is_caching_clip_embeddings = self.datasets is not None and any([self.datasets[i].cache_clip_vision_to_disk for i in range(len(self.datasets))]) |
|
|
|
if has_reg and is_caching_clip_embeddings: |
|
|
|
unconditional_clip_image_embeds = [] |
|
datasets = get_dataloader_datasets(self.data_loader) |
|
for i in range(len(datasets)): |
|
unconditional_clip_image_embeds += datasets[i].clip_vision_unconditional_cache |
|
|
|
if len(unconditional_clip_image_embeds) == 0: |
|
raise ValueError("No unconditional clip image embeds found. This should not happen") |
|
|
|
self._clip_image_embeds_unconditional = unconditional_clip_image_embeds |
|
|
|
if self.train_config.negative_prompt is not None: |
|
if os.path.exists(self.train_config.negative_prompt): |
|
with open(self.train_config.negative_prompt, 'r') as f: |
|
self.negative_prompt_pool = f.readlines() |
|
|
|
self.negative_prompt_pool = [x.strip() for x in self.negative_prompt_pool if x.strip() != ""] |
|
else: |
|
|
|
self.negative_prompt_pool = [self.train_config.negative_prompt] |
|
|
|
def process_output_for_turbo(self, pred, noisy_latents, timesteps, noise, batch): |
|
|
|
|
|
|
|
|
|
batch_size = pred.shape[0] |
|
pred_chunks = torch.chunk(pred, batch_size, dim=0) |
|
noisy_latents_chunks = torch.chunk(noisy_latents, batch_size, dim=0) |
|
timesteps_chunks = torch.chunk(timesteps, batch_size, dim=0) |
|
latent_chunks = torch.chunk(batch.latents, batch_size, dim=0) |
|
noise_chunks = torch.chunk(noise, batch_size, dim=0) |
|
|
|
with torch.no_grad(): |
|
|
|
self.sd.noise_scheduler.set_timesteps( |
|
self.sd.noise_scheduler.config.num_train_timesteps, |
|
device=self.device_torch |
|
) |
|
train_timesteps = self.sd.noise_scheduler.timesteps.clone().detach() |
|
|
|
train_sigmas = self.sd.noise_scheduler.sigmas.clone().detach() |
|
|
|
|
|
self.sd.noise_scheduler.set_timesteps( |
|
1, |
|
device=self.device_torch |
|
) |
|
|
|
denoised_pred_chunks = [] |
|
target_pred_chunks = [] |
|
|
|
for i in range(batch_size): |
|
pred_item = pred_chunks[i] |
|
noisy_latents_item = noisy_latents_chunks[i] |
|
timesteps_item = timesteps_chunks[i] |
|
latents_item = latent_chunks[i] |
|
noise_item = noise_chunks[i] |
|
with torch.no_grad(): |
|
timestep_idx = [(train_timesteps == t).nonzero().item() for t in timesteps_item][0] |
|
single_step_timestep_schedule = [timesteps_item.squeeze().item()] |
|
|
|
sigmas = train_sigmas[timestep_idx:timestep_idx + 1].to(self.device_torch) |
|
|
|
end_sigma_idx = random.randint(timestep_idx, len(train_sigmas) - 1) |
|
end_sigma = train_sigmas[end_sigma_idx:end_sigma_idx + 1].to(self.device_torch) |
|
|
|
|
|
|
|
|
|
|
|
self.sd.noise_scheduler.sigmas = torch.cat([sigmas, end_sigma]).detach() |
|
|
|
self.sd.noise_scheduler.timesteps = torch.from_numpy( |
|
np.array(single_step_timestep_schedule, dtype=np.float32) |
|
).to(device=self.device_torch) |
|
|
|
|
|
self.sd.noise_scheduler._step_index = None |
|
|
|
denoised_latent = self.sd.noise_scheduler.step( |
|
pred_item, timesteps_item, noisy_latents_item.detach(), return_dict=False |
|
)[0] |
|
|
|
residual_noise = (noise_item * end_sigma.flatten()).detach().to(self.device_torch, dtype=get_torch_dtype( |
|
self.train_config.dtype)) |
|
|
|
denoised_latent = denoised_latent - residual_noise |
|
|
|
denoised_pred_chunks.append(denoised_latent) |
|
|
|
denoised_latents = torch.cat(denoised_pred_chunks, dim=0) |
|
|
|
self.sd.noise_scheduler.set_timesteps( |
|
self.sd.noise_scheduler.config.num_train_timesteps, |
|
device=self.device_torch |
|
) |
|
|
|
output = denoised_latents / self.sd.vae.config['scaling_factor'] |
|
output = self.sd.vae.decode(output).sample |
|
|
|
if self.train_config.show_turbo_outputs: |
|
|
|
with torch.no_grad(): |
|
show_tensors(output) |
|
|
|
|
|
|
|
|
|
|
|
return output, batch.tensor.to(self.device_torch, dtype=get_torch_dtype(self.train_config.dtype)) |
|
|
|
|
|
def calculate_loss( |
|
self, |
|
noise_pred: torch.Tensor, |
|
noise: torch.Tensor, |
|
noisy_latents: torch.Tensor, |
|
timesteps: torch.Tensor, |
|
batch: 'DataLoaderBatchDTO', |
|
mask_multiplier: Union[torch.Tensor, float] = 1.0, |
|
prior_pred: Union[torch.Tensor, None] = None, |
|
**kwargs |
|
): |
|
loss_target = self.train_config.loss_target |
|
is_reg = any(batch.get_is_reg_list()) |
|
|
|
prior_mask_multiplier = None |
|
target_mask_multiplier = None |
|
dtype = get_torch_dtype(self.train_config.dtype) |
|
|
|
has_mask = batch.mask_tensor is not None |
|
|
|
with torch.no_grad(): |
|
loss_multiplier = torch.tensor(batch.loss_multiplier_list).to(self.device_torch, dtype=torch.float32) |
|
|
|
if self.train_config.match_noise_norm: |
|
|
|
noise_norm = torch.linalg.vector_norm(noise, ord=2, dim=(1, 2, 3), keepdim=True) |
|
noise_pred_norm = torch.linalg.vector_norm(noise_pred, ord=2, dim=(1, 2, 3), keepdim=True) |
|
noise_pred = noise_pred * (noise_norm / noise_pred_norm) |
|
|
|
if self.train_config.pred_scaler != 1.0: |
|
noise_pred = noise_pred * self.train_config.pred_scaler |
|
|
|
target = None |
|
|
|
if self.train_config.target_noise_multiplier != 1.0: |
|
noise = noise * self.train_config.target_noise_multiplier |
|
|
|
if self.train_config.correct_pred_norm or (self.train_config.inverted_mask_prior and prior_pred is not None and has_mask): |
|
if self.train_config.correct_pred_norm and not is_reg: |
|
with torch.no_grad(): |
|
|
|
if prior_pred is not None: |
|
prior_mean = prior_pred.mean([2,3], keepdim=True) |
|
prior_std = prior_pred.std([2,3], keepdim=True) |
|
noise_mean = noise_pred.mean([2,3], keepdim=True) |
|
noise_std = noise_pred.std([2,3], keepdim=True) |
|
|
|
mean_adjust = prior_mean - noise_mean |
|
std_adjust = prior_std - noise_std |
|
|
|
mean_adjust = mean_adjust * self.train_config.correct_pred_norm_multiplier |
|
std_adjust = std_adjust * self.train_config.correct_pred_norm_multiplier |
|
|
|
target_mean = noise_mean + mean_adjust |
|
target_std = noise_std + std_adjust |
|
|
|
eps = 1e-5 |
|
|
|
noise = (noise - noise_mean) / (noise_std + eps) |
|
noise = noise * (target_std + eps) + target_mean |
|
noise = noise.detach() |
|
|
|
if self.train_config.inverted_mask_prior and prior_pred is not None and has_mask: |
|
assert not self.train_config.train_turbo |
|
with torch.no_grad(): |
|
|
|
stretched_mask_multiplier = value_map( |
|
mask_multiplier, |
|
batch.file_items[0].dataset_config.mask_min_value, |
|
1.0, |
|
0.0, |
|
1.0 |
|
) |
|
|
|
prior_mask_multiplier = 1.0 - stretched_mask_multiplier |
|
|
|
|
|
|
|
|
|
target = noise |
|
|
|
|
|
|
|
elif prior_pred is not None and not self.train_config.do_prior_divergence: |
|
assert not self.train_config.train_turbo |
|
|
|
target = prior_pred |
|
elif self.sd.prediction_type == 'v_prediction': |
|
|
|
target = self.sd.noise_scheduler.get_velocity(batch.tensor, noise, timesteps) |
|
|
|
elif self.sd.is_flow_matching: |
|
target = (noise - batch.latents).detach() |
|
else: |
|
target = noise |
|
|
|
if target is None: |
|
target = noise |
|
|
|
pred = noise_pred |
|
|
|
if self.train_config.train_turbo: |
|
pred, target = self.process_output_for_turbo(pred, noisy_latents, timesteps, noise, batch) |
|
|
|
ignore_snr = False |
|
|
|
if loss_target == 'source' or loss_target == 'unaugmented': |
|
assert not self.train_config.train_turbo |
|
|
|
if batch.sigmas is None: |
|
raise ValueError("Batch sigmas is None. This should not happen") |
|
|
|
|
|
denoised_latents = noise_pred * (-batch.sigmas) + noisy_latents |
|
weighing = batch.sigmas ** -2.0 |
|
if loss_target == 'source': |
|
|
|
target = batch.latents |
|
elif loss_target == 'unaugmented': |
|
|
|
|
|
with torch.no_grad(): |
|
unaugmented_latents = self.sd.encode_images(batch.unaugmented_tensor).to(self.device_torch, dtype=dtype) |
|
unaugmented_latents = unaugmented_latents * self.train_config.latent_multiplier |
|
target = unaugmented_latents.detach() |
|
|
|
|
|
if self.sd.noise_scheduler.config.prediction_type == "epsilon": |
|
target = target |
|
elif self.sd.noise_scheduler.config.prediction_type == "v_prediction": |
|
target = self.sd.noise_scheduler.get_velocity(target, noise, timesteps) |
|
else: |
|
raise ValueError(f"Unknown prediction type {self.sd.noise_scheduler.config.prediction_type}") |
|
|
|
|
|
loss_per_element = (weighing.float() * (denoised_latents.float() - target.float()) ** 2) |
|
loss = loss_per_element |
|
else: |
|
|
|
if self.train_config.loss_type == "mae": |
|
loss = torch.nn.functional.l1_loss(pred.float(), target.float(), reduction="none") |
|
else: |
|
loss = torch.nn.functional.mse_loss(pred.float(), target.float(), reduction="none") |
|
|
|
|
|
if self.sd.is_flow_matching and (self.train_config.linear_timesteps or self.train_config.linear_timesteps2): |
|
|
|
timestep_weight = self.sd.noise_scheduler.get_weights_for_timesteps( |
|
timesteps, |
|
v2=self.train_config.linear_timesteps2 |
|
).to(loss.device, dtype=loss.dtype) |
|
timestep_weight = timestep_weight.view(-1, 1, 1, 1).detach() |
|
loss = loss * timestep_weight |
|
|
|
if self.train_config.do_prior_divergence and prior_pred is not None: |
|
loss = loss + (torch.nn.functional.mse_loss(pred.float(), prior_pred.float(), reduction="none") * -1.0) |
|
|
|
if self.train_config.train_turbo: |
|
mask_multiplier = mask_multiplier[:, 3:, :, :] |
|
|
|
mask_multiplier = torch.nn.functional.interpolate(mask_multiplier, size=(pred.shape[2], pred.shape[3]), mode='nearest') |
|
|
|
|
|
loss = loss * mask_multiplier |
|
|
|
prior_loss = None |
|
if self.train_config.inverted_mask_prior and prior_pred is not None and prior_mask_multiplier is not None: |
|
assert not self.train_config.train_turbo |
|
if self.train_config.loss_type == "mae": |
|
prior_loss = torch.nn.functional.l1_loss(pred.float(), prior_pred.float(), reduction="none") |
|
else: |
|
prior_loss = torch.nn.functional.mse_loss(pred.float(), prior_pred.float(), reduction="none") |
|
|
|
prior_loss = prior_loss * prior_mask_multiplier * self.train_config.inverted_mask_prior_multiplier |
|
if torch.isnan(prior_loss).any(): |
|
print("Prior loss is nan") |
|
prior_loss = None |
|
else: |
|
prior_loss = prior_loss.mean([1, 2, 3]) |
|
|
|
|
|
|
|
loss = loss.mean([1, 2, 3]) |
|
|
|
loss = loss * loss_multiplier |
|
if prior_loss is not None: |
|
loss = loss + prior_loss |
|
|
|
if not self.train_config.train_turbo: |
|
if self.train_config.learnable_snr_gos: |
|
|
|
loss = apply_learnable_snr_gos(loss, timesteps, self.snr_gos) |
|
elif self.train_config.snr_gamma is not None and self.train_config.snr_gamma > 0.000001 and not ignore_snr: |
|
|
|
loss = apply_snr_weight(loss, timesteps, self.sd.noise_scheduler, self.train_config.snr_gamma, |
|
fixed=True) |
|
elif self.train_config.min_snr_gamma is not None and self.train_config.min_snr_gamma > 0.000001 and not ignore_snr: |
|
|
|
loss = apply_snr_weight(loss, timesteps, self.sd.noise_scheduler, self.train_config.min_snr_gamma) |
|
|
|
loss = loss.mean() |
|
|
|
|
|
if self.adapter is not None and hasattr(self.adapter, "additional_loss") and self.adapter.additional_loss is not None: |
|
|
|
loss = loss + self.adapter.additional_loss.mean() |
|
self.adapter.additional_loss = None |
|
|
|
if self.train_config.target_norm_std: |
|
|
|
pred_std = noise_pred.std([2, 3], keepdim=True) |
|
norm_std_loss = torch.abs(self.train_config.target_norm_std_value - pred_std).mean() |
|
loss = loss + norm_std_loss |
|
|
|
|
|
return loss |
|
|
|
def preprocess_batch(self, batch: 'DataLoaderBatchDTO'): |
|
return batch |
|
|
|
def get_guided_loss( |
|
self, |
|
noisy_latents: torch.Tensor, |
|
conditional_embeds: PromptEmbeds, |
|
match_adapter_assist: bool, |
|
network_weight_list: list, |
|
timesteps: torch.Tensor, |
|
pred_kwargs: dict, |
|
batch: 'DataLoaderBatchDTO', |
|
noise: torch.Tensor, |
|
unconditional_embeds: Optional[PromptEmbeds] = None, |
|
**kwargs |
|
): |
|
loss = get_guidance_loss( |
|
noisy_latents=noisy_latents, |
|
conditional_embeds=conditional_embeds, |
|
match_adapter_assist=match_adapter_assist, |
|
network_weight_list=network_weight_list, |
|
timesteps=timesteps, |
|
pred_kwargs=pred_kwargs, |
|
batch=batch, |
|
noise=noise, |
|
sd=self.sd, |
|
unconditional_embeds=unconditional_embeds, |
|
scaler=self.scaler, |
|
**kwargs |
|
) |
|
|
|
return loss |
|
|
|
def get_guided_loss_targeted_polarity( |
|
self, |
|
noisy_latents: torch.Tensor, |
|
conditional_embeds: PromptEmbeds, |
|
match_adapter_assist: bool, |
|
network_weight_list: list, |
|
timesteps: torch.Tensor, |
|
pred_kwargs: dict, |
|
batch: 'DataLoaderBatchDTO', |
|
noise: torch.Tensor, |
|
**kwargs |
|
): |
|
with torch.no_grad(): |
|
|
|
dtype = get_torch_dtype(self.train_config.dtype) |
|
|
|
conditional_latents = batch.latents.to(self.device_torch, dtype=dtype).detach() |
|
unconditional_latents = batch.unconditional_latents.to(self.device_torch, dtype=dtype).detach() |
|
|
|
mean_latents = (conditional_latents + unconditional_latents) / 2.0 |
|
|
|
unconditional_diff = (unconditional_latents - mean_latents) |
|
conditional_diff = (conditional_latents - mean_latents) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
conditional_noisy_latents = self.sd.add_noise( |
|
mean_latents, |
|
noise, |
|
timesteps |
|
).detach() |
|
|
|
unconditional_noisy_latents = self.sd.add_noise( |
|
mean_latents, |
|
noise, |
|
timesteps |
|
).detach() |
|
|
|
|
|
self.network.is_active = False |
|
self.sd.unet.eval() |
|
|
|
|
|
|
|
baseline_prediction = self.sd.predict_noise( |
|
latents=unconditional_noisy_latents.to(self.device_torch, dtype=dtype).detach(), |
|
conditional_embeddings=conditional_embeds.to(self.device_torch, dtype=dtype).detach(), |
|
timestep=timesteps, |
|
guidance_scale=1.0, |
|
**pred_kwargs |
|
).detach() |
|
|
|
|
|
cat_embeds = concat_prompt_embeds([conditional_embeds, conditional_embeds]) |
|
cat_latents = torch.cat([conditional_noisy_latents, conditional_noisy_latents], dim=0) |
|
cat_timesteps = torch.cat([timesteps, timesteps], dim=0) |
|
|
|
|
|
|
|
|
|
negative_network_weights = [weight * -2.0 for weight in network_weight_list] |
|
positive_network_weights = [weight * 2.0 for weight in network_weight_list] |
|
cat_network_weight_list = positive_network_weights + negative_network_weights |
|
|
|
|
|
self.sd.unet.train() |
|
self.network.is_active = True |
|
|
|
self.network.multiplier = cat_network_weight_list |
|
|
|
|
|
prediction = self.sd.predict_noise( |
|
latents=cat_latents.to(self.device_torch, dtype=dtype).detach(), |
|
conditional_embeddings=cat_embeds.to(self.device_torch, dtype=dtype).detach(), |
|
timestep=cat_timesteps, |
|
guidance_scale=1.0, |
|
**pred_kwargs |
|
) |
|
|
|
pred_pos, pred_neg = torch.chunk(prediction, 2, dim=0) |
|
|
|
pred_pos = pred_pos - baseline_prediction |
|
pred_neg = pred_neg - baseline_prediction |
|
|
|
pred_loss = torch.nn.functional.mse_loss( |
|
pred_pos.float(), |
|
unconditional_diff.float(), |
|
reduction="none" |
|
) |
|
pred_loss = pred_loss.mean([1, 2, 3]) |
|
|
|
pred_neg_loss = torch.nn.functional.mse_loss( |
|
pred_neg.float(), |
|
conditional_diff.float(), |
|
reduction="none" |
|
) |
|
pred_neg_loss = pred_neg_loss.mean([1, 2, 3]) |
|
|
|
loss = (pred_loss + pred_neg_loss) / 2.0 |
|
|
|
|
|
loss = loss.mean() |
|
loss.backward() |
|
|
|
|
|
loss = loss.detach() |
|
loss.requires_grad_(True) |
|
|
|
return loss |
|
|
|
def get_guided_loss_masked_polarity( |
|
self, |
|
noisy_latents: torch.Tensor, |
|
conditional_embeds: PromptEmbeds, |
|
match_adapter_assist: bool, |
|
network_weight_list: list, |
|
timesteps: torch.Tensor, |
|
pred_kwargs: dict, |
|
batch: 'DataLoaderBatchDTO', |
|
noise: torch.Tensor, |
|
**kwargs |
|
): |
|
with torch.no_grad(): |
|
|
|
dtype = get_torch_dtype(self.train_config.dtype) |
|
|
|
conditional_latents = batch.latents.to(self.device_torch, dtype=dtype).detach() |
|
unconditional_latents = batch.unconditional_latents.to(self.device_torch, dtype=dtype).detach() |
|
inverse_latents = unconditional_latents - (conditional_latents - unconditional_latents) |
|
|
|
mean_latents = (conditional_latents + unconditional_latents) / 2.0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
differential_mask = torch.abs(conditional_latents - unconditional_latents) |
|
max_differential = \ |
|
differential_mask.max(dim=1, keepdim=True)[0].max(dim=2, keepdim=True)[0].max(dim=3, keepdim=True)[0] |
|
differential_scaler = 1.0 / max_differential |
|
differential_mask = differential_mask * differential_scaler |
|
spread_point = 0.1 |
|
|
|
differential_mask = ((differential_mask - spread_point) * 10.0) + spread_point |
|
|
|
differential_mask = torch.clamp(differential_mask, 0.0, 1.0) |
|
|
|
|
|
|
|
conditional_noisy_latents = self.sd.add_noise( |
|
conditional_latents, |
|
noise, |
|
timesteps |
|
).detach() |
|
|
|
unconditional_noisy_latents = self.sd.add_noise( |
|
unconditional_latents, |
|
noise, |
|
timesteps |
|
).detach() |
|
|
|
inverse_noisy_latents = self.sd.add_noise( |
|
inverse_latents, |
|
noise, |
|
timesteps |
|
).detach() |
|
|
|
|
|
self.network.is_active = False |
|
self.sd.unet.eval() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cat_embeds = concat_prompt_embeds([conditional_embeds, conditional_embeds]) |
|
cat_latents = torch.cat([conditional_noisy_latents, unconditional_noisy_latents], dim=0) |
|
cat_timesteps = torch.cat([timesteps, timesteps], dim=0) |
|
|
|
|
|
|
|
|
|
negative_network_weights = [weight * -1.0 for weight in network_weight_list] |
|
positive_network_weights = [weight * 1.0 for weight in network_weight_list] |
|
cat_network_weight_list = positive_network_weights + negative_network_weights |
|
|
|
|
|
self.sd.unet.train() |
|
self.network.is_active = True |
|
|
|
self.network.multiplier = cat_network_weight_list |
|
|
|
|
|
prediction = self.sd.predict_noise( |
|
latents=cat_latents.to(self.device_torch, dtype=dtype).detach(), |
|
conditional_embeddings=cat_embeds.to(self.device_torch, dtype=dtype).detach(), |
|
timestep=cat_timesteps, |
|
guidance_scale=1.0, |
|
**pred_kwargs |
|
) |
|
|
|
pred_pos, pred_neg = torch.chunk(prediction, 2, dim=0) |
|
|
|
|
|
differential_mean_pred_loss = torch.abs(pred_pos - pred_neg).mean([1, 2, 3]) ** 2.0 |
|
|
|
|
|
|
|
|
|
pred_loss = torch.nn.functional.mse_loss( |
|
pred_pos.float(), |
|
noise.float(), |
|
reduction="none" |
|
) |
|
|
|
pred_loss = pred_loss * (1.0 + differential_mask) |
|
pred_loss = pred_loss.mean([1, 2, 3]) |
|
|
|
pred_neg_loss = torch.nn.functional.mse_loss( |
|
pred_neg.float(), |
|
noise.float(), |
|
reduction="none" |
|
) |
|
|
|
pred_neg_loss = pred_neg_loss * (1.0 - differential_mask) |
|
pred_neg_loss = pred_neg_loss.mean([1, 2, 3]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
loss = pred_loss + pred_neg_loss |
|
|
|
|
|
loss = loss.mean() |
|
loss.backward() |
|
|
|
|
|
loss = loss.detach() |
|
loss.requires_grad_(True) |
|
|
|
return loss |
|
|
|
def get_prior_prediction( |
|
self, |
|
noisy_latents: torch.Tensor, |
|
conditional_embeds: PromptEmbeds, |
|
match_adapter_assist: bool, |
|
network_weight_list: list, |
|
timesteps: torch.Tensor, |
|
pred_kwargs: dict, |
|
batch: 'DataLoaderBatchDTO', |
|
noise: torch.Tensor, |
|
unconditional_embeds: Optional[PromptEmbeds] = None, |
|
conditioned_prompts=None, |
|
**kwargs |
|
): |
|
|
|
was_unet_training = self.sd.unet.training |
|
was_network_active = False |
|
if self.network is not None: |
|
was_network_active = self.network.is_active |
|
self.network.is_active = False |
|
can_disable_adapter = False |
|
was_adapter_active = False |
|
if self.adapter is not None and (isinstance(self.adapter, IPAdapter) or |
|
isinstance(self.adapter, ReferenceAdapter) or |
|
(isinstance(self.adapter, CustomAdapter)) |
|
): |
|
can_disable_adapter = True |
|
was_adapter_active = self.adapter.is_active |
|
self.adapter.is_active = False |
|
|
|
|
|
with torch.no_grad(): |
|
dtype = get_torch_dtype(self.train_config.dtype) |
|
|
|
embeds_to_use = conditional_embeds.clone().detach() |
|
|
|
if (self.adapter is not None and isinstance(self.adapter, ClipVisionAdapter)) or self.embedding is not None: |
|
prompt_list = batch.get_caption_list() |
|
class_name = '' |
|
|
|
triggers = ['[trigger]', '[name]'] |
|
remove_tokens = [] |
|
|
|
if self.embed_config is not None: |
|
triggers.append(self.embed_config.trigger) |
|
for i in range(1, self.embed_config.tokens): |
|
remove_tokens.append(f"{self.embed_config.trigger}_{i}") |
|
if self.embed_config.trigger_class_name is not None: |
|
class_name = self.embed_config.trigger_class_name |
|
|
|
if self.adapter is not None: |
|
triggers.append(self.adapter_config.trigger) |
|
for i in range(1, self.adapter_config.num_tokens): |
|
remove_tokens.append(f"{self.adapter_config.trigger}_{i}") |
|
if self.adapter_config.trigger_class_name is not None: |
|
class_name = self.adapter_config.trigger_class_name |
|
|
|
for idx, prompt in enumerate(prompt_list): |
|
for remove_token in remove_tokens: |
|
prompt = prompt.replace(remove_token, '') |
|
for trigger in triggers: |
|
prompt = prompt.replace(trigger, class_name) |
|
prompt_list[idx] = prompt |
|
|
|
embeds_to_use = self.sd.encode_prompt( |
|
prompt_list, |
|
long_prompts=self.do_long_prompts).to( |
|
self.device_torch, |
|
dtype=dtype).detach() |
|
|
|
|
|
|
|
self.sd.unet.eval() |
|
|
|
if self.adapter is not None and isinstance(self.adapter, IPAdapter) and not self.sd.is_flux: |
|
|
|
embeds_to_use: PromptEmbeds = embeds_to_use.clone().detach() |
|
end_pos = embeds_to_use.text_embeds.shape[1] - self.adapter_config.num_tokens |
|
embeds_to_use.text_embeds = embeds_to_use.text_embeds[:, :end_pos, :] |
|
if unconditional_embeds is not None: |
|
unconditional_embeds = unconditional_embeds.clone().detach() |
|
unconditional_embeds.text_embeds = unconditional_embeds.text_embeds[:, :end_pos] |
|
|
|
if unconditional_embeds is not None: |
|
unconditional_embeds = unconditional_embeds.to(self.device_torch, dtype=dtype).detach() |
|
|
|
prior_pred = self.sd.predict_noise( |
|
latents=noisy_latents.to(self.device_torch, dtype=dtype).detach(), |
|
conditional_embeddings=embeds_to_use.to(self.device_torch, dtype=dtype).detach(), |
|
unconditional_embeddings=unconditional_embeds, |
|
timestep=timesteps, |
|
guidance_scale=self.train_config.cfg_scale, |
|
rescale_cfg=self.train_config.cfg_rescale, |
|
**pred_kwargs |
|
) |
|
if was_unet_training: |
|
self.sd.unet.train() |
|
prior_pred = prior_pred.detach() |
|
|
|
if match_adapter_assist and 'down_intrablock_additional_residuals' in pred_kwargs: |
|
del pred_kwargs['down_intrablock_additional_residuals'] |
|
if match_adapter_assist and 'down_block_additional_residuals' in pred_kwargs: |
|
del pred_kwargs['down_block_additional_residuals'] |
|
if match_adapter_assist and 'mid_block_additional_residual' in pred_kwargs: |
|
del pred_kwargs['mid_block_additional_residual'] |
|
|
|
if can_disable_adapter: |
|
self.adapter.is_active = was_adapter_active |
|
|
|
|
|
if self.network is not None: |
|
self.network.is_active = was_network_active |
|
return prior_pred |
|
|
|
def before_unet_predict(self): |
|
pass |
|
|
|
def after_unet_predict(self): |
|
pass |
|
|
|
def end_of_training_loop(self): |
|
pass |
|
|
|
def predict_noise( |
|
self, |
|
noisy_latents: torch.Tensor, |
|
timesteps: Union[int, torch.Tensor] = 1, |
|
conditional_embeds: Union[PromptEmbeds, None] = None, |
|
unconditional_embeds: Union[PromptEmbeds, None] = None, |
|
**kwargs, |
|
): |
|
dtype = get_torch_dtype(self.train_config.dtype) |
|
return self.sd.predict_noise( |
|
latents=noisy_latents.to(self.device_torch, dtype=dtype), |
|
conditional_embeddings=conditional_embeds.to(self.device_torch, dtype=dtype), |
|
unconditional_embeddings=unconditional_embeds, |
|
timestep=timesteps, |
|
guidance_scale=self.train_config.cfg_scale, |
|
detach_unconditional=False, |
|
rescale_cfg=self.train_config.cfg_rescale, |
|
**kwargs |
|
) |
|
|
|
def train_single_accumulation(self, batch: DataLoaderBatchDTO): |
|
self.timer.start('preprocess_batch') |
|
batch = self.preprocess_batch(batch) |
|
dtype = get_torch_dtype(self.train_config.dtype) |
|
|
|
if self.sd.vae.dtype != self.sd.vae_torch_dtype: |
|
self.sd.vae = self.sd.vae.to(self.sd.vae_torch_dtype) |
|
if isinstance(self.sd.text_encoder, list): |
|
for encoder in self.sd.text_encoder: |
|
if encoder.dtype != self.sd.te_torch_dtype: |
|
encoder.to(self.sd.te_torch_dtype) |
|
else: |
|
if self.sd.text_encoder.dtype != self.sd.te_torch_dtype: |
|
self.sd.text_encoder.to(self.sd.te_torch_dtype) |
|
|
|
noisy_latents, noise, timesteps, conditioned_prompts, imgs = self.process_general_training_batch(batch) |
|
if self.train_config.do_cfg or self.train_config.do_random_cfg: |
|
|
|
if self.negative_prompt_pool is not None: |
|
negative_prompts = [] |
|
for i in range(noisy_latents.shape[0]): |
|
num_neg = random.randint(1, self.train_config.max_negative_prompts) |
|
this_neg_prompts = [random.choice(self.negative_prompt_pool) for _ in range(num_neg)] |
|
this_neg_prompt = ', '.join(this_neg_prompts) |
|
negative_prompts.append(this_neg_prompt) |
|
self.batch_negative_prompt = negative_prompts |
|
else: |
|
self.batch_negative_prompt = ['' for _ in range(batch.latents.shape[0])] |
|
|
|
if self.adapter and isinstance(self.adapter, CustomAdapter): |
|
|
|
|
|
self.adapter.num_control_images = 1 |
|
conditioned_prompts = self.adapter.condition_prompt(conditioned_prompts) |
|
|
|
network_weight_list = batch.get_network_weight_list() |
|
if self.train_config.single_item_batching: |
|
network_weight_list = network_weight_list + network_weight_list |
|
|
|
has_adapter_img = batch.control_tensor is not None |
|
has_clip_image = batch.clip_image_tensor is not None |
|
has_clip_image_embeds = batch.clip_image_embeds is not None |
|
|
|
if any([batch.file_items[idx].is_reg for idx in range(len(batch.file_items))]): |
|
has_clip_image = True |
|
if self._clip_image_embeds_unconditional is not None: |
|
has_clip_image_embeds = True |
|
has_clip_image = False |
|
|
|
if self.adapter is not None and isinstance(self.adapter, IPAdapter) and not has_clip_image and has_adapter_img: |
|
raise ValueError( |
|
"IPAdapter control image is now 'clip_image_path' instead of 'control_path'. Please update your dataset config ") |
|
|
|
match_adapter_assist = False |
|
|
|
|
|
if self.assistant_adapter: |
|
if self.train_config.match_adapter_chance == 1.0: |
|
match_adapter_assist = True |
|
elif self.train_config.match_adapter_chance > 0.0: |
|
match_adapter_assist = torch.rand( |
|
(1,), device=self.device_torch, dtype=dtype |
|
) < self.train_config.match_adapter_chance |
|
|
|
self.timer.stop('preprocess_batch') |
|
|
|
is_reg = False |
|
with torch.no_grad(): |
|
loss_multiplier = torch.ones((noisy_latents.shape[0], 1, 1, 1), device=self.device_torch, dtype=dtype) |
|
for idx, file_item in enumerate(batch.file_items): |
|
if file_item.is_reg: |
|
loss_multiplier[idx] = loss_multiplier[idx] * self.train_config.reg_weight |
|
is_reg = True |
|
|
|
adapter_images = None |
|
sigmas = None |
|
if has_adapter_img and (self.adapter or self.assistant_adapter): |
|
with self.timer('get_adapter_images'): |
|
|
|
if batch.control_tensor is not None: |
|
adapter_images = batch.control_tensor.to(self.device_torch, dtype=dtype).detach() |
|
|
|
if self.assistant_adapter is not None: |
|
in_channels = self.assistant_adapter.config.in_channels |
|
if adapter_images.shape[1] != in_channels: |
|
|
|
adapter_images = adapter_images[:, :in_channels, :, :] |
|
else: |
|
raise NotImplementedError("Adapter images now must be loaded with dataloader") |
|
|
|
clip_images = None |
|
if has_clip_image: |
|
with self.timer('get_clip_images'): |
|
|
|
if batch.clip_image_tensor is not None: |
|
clip_images = batch.clip_image_tensor.to(self.device_torch, dtype=dtype).detach() |
|
|
|
mask_multiplier = torch.ones((noisy_latents.shape[0], 1, 1, 1), device=self.device_torch, dtype=dtype) |
|
if batch.mask_tensor is not None: |
|
with self.timer('get_mask_multiplier'): |
|
|
|
mask_multiplier = batch.mask_tensor.to(self.device_torch, dtype=torch.float16).detach() |
|
|
|
mask_multiplier = torch.nn.functional.interpolate( |
|
mask_multiplier, size=(noisy_latents.shape[2], noisy_latents.shape[3]) |
|
) |
|
|
|
mask_multiplier = mask_multiplier.expand(-1, noisy_latents.shape[1], -1, -1) |
|
mask_multiplier = mask_multiplier.to(self.device_torch, dtype=dtype).detach() |
|
|
|
def get_adapter_multiplier(): |
|
if self.adapter and isinstance(self.adapter, T2IAdapter): |
|
|
|
return 1.0 |
|
elif match_adapter_assist: |
|
|
|
adapter_strength_min = 0.9 |
|
adapter_strength_max = 1.0 |
|
else: |
|
|
|
|
|
|
|
adapter_strength_min = 0.5 |
|
adapter_strength_max = 1.1 |
|
|
|
adapter_conditioning_scale = torch.rand( |
|
(1,), device=self.device_torch, dtype=dtype |
|
) |
|
|
|
adapter_conditioning_scale = value_map( |
|
adapter_conditioning_scale, |
|
0.0, |
|
1.0, |
|
adapter_strength_min, |
|
adapter_strength_max |
|
) |
|
return adapter_conditioning_scale |
|
|
|
|
|
with self.timer('grad_setup'): |
|
|
|
|
|
grad_on_text_encoder = False |
|
if self.train_config.train_text_encoder: |
|
grad_on_text_encoder = True |
|
|
|
if self.embedding is not None: |
|
grad_on_text_encoder = True |
|
|
|
if self.adapter and isinstance(self.adapter, ClipVisionAdapter): |
|
grad_on_text_encoder = True |
|
|
|
if self.adapter_config and self.adapter_config.type == 'te_augmenter': |
|
grad_on_text_encoder = True |
|
|
|
|
|
if self.network is not None: |
|
network = self.network |
|
else: |
|
network = BlankNetwork() |
|
|
|
|
|
network.multiplier = network_weight_list |
|
|
|
|
|
|
|
prompts_1 = conditioned_prompts |
|
prompts_2 = None |
|
if self.train_config.short_and_long_captions_encoder_split and self.sd.is_xl: |
|
prompts_1 = batch.get_caption_short_list() |
|
prompts_2 = conditioned_prompts |
|
|
|
|
|
if self.train_config.single_item_batching: |
|
if self.model_config.refiner_name_or_path is not None: |
|
raise ValueError("Single item batching is not supported when training the refiner") |
|
batch_size = noisy_latents.shape[0] |
|
|
|
noisy_latents_list = torch.chunk(noisy_latents, batch_size, dim=0) |
|
noise_list = torch.chunk(noise, batch_size, dim=0) |
|
timesteps_list = torch.chunk(timesteps, batch_size, dim=0) |
|
conditioned_prompts_list = [[prompt] for prompt in prompts_1] |
|
if imgs is not None: |
|
imgs_list = torch.chunk(imgs, batch_size, dim=0) |
|
else: |
|
imgs_list = [None for _ in range(batch_size)] |
|
if adapter_images is not None: |
|
adapter_images_list = torch.chunk(adapter_images, batch_size, dim=0) |
|
else: |
|
adapter_images_list = [None for _ in range(batch_size)] |
|
if clip_images is not None: |
|
clip_images_list = torch.chunk(clip_images, batch_size, dim=0) |
|
else: |
|
clip_images_list = [None for _ in range(batch_size)] |
|
mask_multiplier_list = torch.chunk(mask_multiplier, batch_size, dim=0) |
|
if prompts_2 is None: |
|
prompt_2_list = [None for _ in range(batch_size)] |
|
else: |
|
prompt_2_list = [[prompt] for prompt in prompts_2] |
|
|
|
else: |
|
noisy_latents_list = [noisy_latents] |
|
noise_list = [noise] |
|
timesteps_list = [timesteps] |
|
conditioned_prompts_list = [prompts_1] |
|
imgs_list = [imgs] |
|
adapter_images_list = [adapter_images] |
|
clip_images_list = [clip_images] |
|
mask_multiplier_list = [mask_multiplier] |
|
if prompts_2 is None: |
|
prompt_2_list = [None] |
|
else: |
|
prompt_2_list = [prompts_2] |
|
|
|
for noisy_latents, noise, timesteps, conditioned_prompts, imgs, adapter_images, clip_images, mask_multiplier, prompt_2 in zip( |
|
noisy_latents_list, |
|
noise_list, |
|
timesteps_list, |
|
conditioned_prompts_list, |
|
imgs_list, |
|
adapter_images_list, |
|
clip_images_list, |
|
mask_multiplier_list, |
|
prompt_2_list |
|
): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with (network): |
|
|
|
if self.adapter and isinstance(self.adapter, ClipVisionAdapter): |
|
with self.timer('encode_clip_vision_embeds'): |
|
if has_clip_image: |
|
conditional_clip_embeds = self.adapter.get_clip_image_embeds_from_tensors( |
|
clip_images.detach().to(self.device_torch, dtype=dtype), |
|
is_training=True, |
|
has_been_preprocessed=True |
|
) |
|
else: |
|
|
|
conditional_clip_embeds = self.adapter.get_clip_image_embeds_from_tensors( |
|
torch.zeros( |
|
(noisy_latents.shape[0], 3, 512, 512), |
|
device=self.device_torch, dtype=dtype |
|
), |
|
is_training=True, |
|
has_been_preprocessed=True, |
|
drop=True |
|
) |
|
|
|
self.adapter(conditional_clip_embeds) |
|
|
|
|
|
if self.adapter and isinstance(self.adapter, CustomAdapter) and has_clip_image: |
|
quad_count = random.randint(1, 4) |
|
self.adapter.train() |
|
self.adapter.trigger_pre_te( |
|
tensors_0_1=clip_images if not is_reg else None, |
|
is_training=True, |
|
has_been_preprocessed=True, |
|
quad_count=quad_count, |
|
batch_size=noisy_latents.shape[0] |
|
) |
|
|
|
with self.timer('encode_prompt'): |
|
unconditional_embeds = None |
|
if grad_on_text_encoder: |
|
with torch.set_grad_enabled(True): |
|
if isinstance(self.adapter, CustomAdapter): |
|
self.adapter.is_unconditional_run = False |
|
conditional_embeds = self.sd.encode_prompt( |
|
conditioned_prompts, prompt_2, |
|
dropout_prob=self.train_config.prompt_dropout_prob, |
|
long_prompts=self.do_long_prompts).to( |
|
self.device_torch, |
|
dtype=dtype) |
|
|
|
if self.train_config.do_cfg: |
|
if isinstance(self.adapter, CustomAdapter): |
|
self.adapter.is_unconditional_run = True |
|
|
|
unconditional_embeds = self.sd.encode_prompt( |
|
self.batch_negative_prompt, |
|
self.batch_negative_prompt, |
|
dropout_prob=self.train_config.prompt_dropout_prob, |
|
long_prompts=self.do_long_prompts).to( |
|
self.device_torch, |
|
dtype=dtype) |
|
if isinstance(self.adapter, CustomAdapter): |
|
self.adapter.is_unconditional_run = False |
|
else: |
|
with torch.set_grad_enabled(False): |
|
|
|
if isinstance(self.sd.text_encoder, list): |
|
for te in self.sd.text_encoder: |
|
te.eval() |
|
else: |
|
self.sd.text_encoder.eval() |
|
if isinstance(self.adapter, CustomAdapter): |
|
self.adapter.is_unconditional_run = False |
|
conditional_embeds = self.sd.encode_prompt( |
|
conditioned_prompts, prompt_2, |
|
dropout_prob=self.train_config.prompt_dropout_prob, |
|
long_prompts=self.do_long_prompts).to( |
|
self.device_torch, |
|
dtype=dtype) |
|
if self.train_config.do_cfg: |
|
if isinstance(self.adapter, CustomAdapter): |
|
self.adapter.is_unconditional_run = True |
|
unconditional_embeds = self.sd.encode_prompt( |
|
self.batch_negative_prompt, |
|
dropout_prob=self.train_config.prompt_dropout_prob, |
|
long_prompts=self.do_long_prompts).to( |
|
self.device_torch, |
|
dtype=dtype) |
|
if isinstance(self.adapter, CustomAdapter): |
|
self.adapter.is_unconditional_run = False |
|
|
|
|
|
conditional_embeds = conditional_embeds.detach() |
|
if self.train_config.do_cfg: |
|
unconditional_embeds = unconditional_embeds.detach() |
|
|
|
|
|
pred_kwargs = {} |
|
|
|
if has_adapter_img: |
|
if (self.adapter and isinstance(self.adapter, T2IAdapter)) or ( |
|
self.assistant_adapter and isinstance(self.assistant_adapter, T2IAdapter)): |
|
with torch.set_grad_enabled(self.adapter is not None): |
|
adapter = self.assistant_adapter if self.assistant_adapter is not None else self.adapter |
|
adapter_multiplier = get_adapter_multiplier() |
|
with self.timer('encode_adapter'): |
|
down_block_additional_residuals = adapter(adapter_images) |
|
if self.assistant_adapter: |
|
|
|
down_block_additional_residuals = [ |
|
sample.to(dtype=dtype).detach() * adapter_multiplier for sample in |
|
down_block_additional_residuals |
|
] |
|
else: |
|
down_block_additional_residuals = [ |
|
sample.to(dtype=dtype) * adapter_multiplier for sample in |
|
down_block_additional_residuals |
|
] |
|
|
|
pred_kwargs['down_intrablock_additional_residuals'] = down_block_additional_residuals |
|
|
|
if self.adapter and isinstance(self.adapter, IPAdapter): |
|
with self.timer('encode_adapter_embeds'): |
|
|
|
quad_count = random.randint(1, 4) |
|
image_size = self.adapter.input_size |
|
if has_clip_image_embeds: |
|
|
|
if is_reg: |
|
|
|
embeds = [ |
|
load_file(random.choice(batch.clip_image_embeds_unconditional)) for i in |
|
range(noisy_latents.shape[0]) |
|
] |
|
conditional_clip_embeds = self.adapter.parse_clip_image_embeds_from_cache( |
|
embeds, |
|
quad_count=quad_count |
|
) |
|
|
|
if self.train_config.do_cfg: |
|
embeds = [ |
|
load_file(random.choice(batch.clip_image_embeds_unconditional)) for i in |
|
range(noisy_latents.shape[0]) |
|
] |
|
unconditional_clip_embeds = self.adapter.parse_clip_image_embeds_from_cache( |
|
embeds, |
|
quad_count=quad_count |
|
) |
|
|
|
else: |
|
conditional_clip_embeds = self.adapter.parse_clip_image_embeds_from_cache( |
|
batch.clip_image_embeds, |
|
quad_count=quad_count |
|
) |
|
if self.train_config.do_cfg: |
|
unconditional_clip_embeds = self.adapter.parse_clip_image_embeds_from_cache( |
|
batch.clip_image_embeds_unconditional, |
|
quad_count=quad_count |
|
) |
|
elif is_reg: |
|
|
|
clip_images = torch.zeros( |
|
(noisy_latents.shape[0], 3, image_size, image_size), |
|
device=self.device_torch, dtype=dtype |
|
).detach() |
|
|
|
conditional_clip_embeds = self.adapter.get_clip_image_embeds_from_tensors( |
|
clip_images, |
|
drop=True, |
|
is_training=True, |
|
has_been_preprocessed=False, |
|
quad_count=quad_count |
|
) |
|
if self.train_config.do_cfg: |
|
unconditional_clip_embeds = self.adapter.get_clip_image_embeds_from_tensors( |
|
torch.zeros( |
|
(noisy_latents.shape[0], 3, image_size, image_size), |
|
device=self.device_torch, dtype=dtype |
|
).detach(), |
|
is_training=True, |
|
drop=True, |
|
has_been_preprocessed=False, |
|
quad_count=quad_count |
|
) |
|
elif has_clip_image: |
|
conditional_clip_embeds = self.adapter.get_clip_image_embeds_from_tensors( |
|
clip_images.detach().to(self.device_torch, dtype=dtype), |
|
is_training=True, |
|
has_been_preprocessed=True, |
|
quad_count=quad_count, |
|
|
|
|
|
|
|
) |
|
if self.train_config.do_cfg: |
|
unconditional_clip_embeds = self.adapter.get_clip_image_embeds_from_tensors( |
|
clip_images.detach().to(self.device_torch, dtype=dtype), |
|
is_training=True, |
|
drop=True, |
|
has_been_preprocessed=True, |
|
quad_count=quad_count |
|
) |
|
else: |
|
print("No Clip Image") |
|
print([file_item.path for file_item in batch.file_items]) |
|
raise ValueError("Could not find clip image") |
|
|
|
if not self.adapter_config.train_image_encoder: |
|
|
|
conditional_clip_embeds = conditional_clip_embeds.detach() |
|
if self.train_config.do_cfg: |
|
unconditional_clip_embeds = unconditional_clip_embeds.detach() |
|
|
|
with self.timer('encode_adapter'): |
|
self.adapter.train() |
|
conditional_embeds = self.adapter( |
|
conditional_embeds.detach(), |
|
conditional_clip_embeds, |
|
is_unconditional=False |
|
) |
|
if self.train_config.do_cfg: |
|
unconditional_embeds = self.adapter( |
|
unconditional_embeds.detach(), |
|
unconditional_clip_embeds, |
|
is_unconditional=True |
|
) |
|
else: |
|
|
|
self.adapter.last_unconditional = None |
|
|
|
if self.adapter and isinstance(self.adapter, ReferenceAdapter): |
|
|
|
self.adapter.noise_scheduler = self.lr_scheduler |
|
if has_clip_image or has_adapter_img: |
|
img_to_use = clip_images if has_clip_image else adapter_images |
|
|
|
reference_images = ((img_to_use - 0.5) * 2).detach().to(self.device_torch, dtype=dtype) |
|
self.adapter.set_reference_images(reference_images) |
|
self.adapter.noise_scheduler = self.sd.noise_scheduler |
|
elif is_reg: |
|
self.adapter.set_blank_reference_images(noisy_latents.shape[0]) |
|
else: |
|
self.adapter.set_reference_images(None) |
|
|
|
prior_pred = None |
|
|
|
do_reg_prior = False |
|
|
|
|
|
|
|
|
|
do_inverted_masked_prior = False |
|
if self.train_config.inverted_mask_prior and batch.mask_tensor is not None: |
|
do_inverted_masked_prior = True |
|
|
|
do_correct_pred_norm_prior = self.train_config.correct_pred_norm |
|
|
|
do_guidance_prior = False |
|
|
|
if batch.unconditional_latents is not None: |
|
|
|
guidance_type: GuidanceType = batch.file_items[0].dataset_config.guidance_type |
|
if guidance_type == 'tnt': |
|
do_guidance_prior = True |
|
|
|
if (( |
|
has_adapter_img and self.assistant_adapter and match_adapter_assist) or self.do_prior_prediction or do_guidance_prior or do_reg_prior or do_inverted_masked_prior or self.train_config.correct_pred_norm): |
|
with self.timer('prior predict'): |
|
prior_pred = self.get_prior_prediction( |
|
noisy_latents=noisy_latents, |
|
conditional_embeds=conditional_embeds, |
|
match_adapter_assist=match_adapter_assist, |
|
network_weight_list=network_weight_list, |
|
timesteps=timesteps, |
|
pred_kwargs=pred_kwargs, |
|
noise=noise, |
|
batch=batch, |
|
unconditional_embeds=unconditional_embeds, |
|
conditioned_prompts=conditioned_prompts |
|
) |
|
if prior_pred is not None: |
|
prior_pred = prior_pred.detach() |
|
|
|
|
|
if self.adapter and isinstance(self.adapter, CustomAdapter) and has_clip_image: |
|
quad_count = random.randint(1, 4) |
|
self.adapter.train() |
|
conditional_embeds = self.adapter.condition_encoded_embeds( |
|
tensors_0_1=clip_images, |
|
prompt_embeds=conditional_embeds, |
|
is_training=True, |
|
has_been_preprocessed=True, |
|
quad_count=quad_count |
|
) |
|
if self.train_config.do_cfg and unconditional_embeds is not None: |
|
unconditional_embeds = self.adapter.condition_encoded_embeds( |
|
tensors_0_1=clip_images, |
|
prompt_embeds=unconditional_embeds, |
|
is_training=True, |
|
has_been_preprocessed=True, |
|
is_unconditional=True, |
|
quad_count=quad_count |
|
) |
|
|
|
if self.adapter and isinstance(self.adapter, CustomAdapter) and batch.extra_values is not None: |
|
self.adapter.add_extra_values(batch.extra_values.detach()) |
|
|
|
if self.train_config.do_cfg: |
|
self.adapter.add_extra_values(torch.zeros_like(batch.extra_values.detach()), |
|
is_unconditional=True) |
|
|
|
if has_adapter_img: |
|
if (self.adapter and isinstance(self.adapter, ControlNetModel)) or ( |
|
self.assistant_adapter and isinstance(self.assistant_adapter, ControlNetModel)): |
|
if self.train_config.do_cfg: |
|
raise ValueError("ControlNetModel is not supported with CFG") |
|
with torch.set_grad_enabled(self.adapter is not None): |
|
adapter: ControlNetModel = self.assistant_adapter if self.assistant_adapter is not None else self.adapter |
|
adapter_multiplier = get_adapter_multiplier() |
|
with self.timer('encode_adapter'): |
|
|
|
added_cond_kwargs = {} |
|
if self.sd.is_xl: |
|
added_cond_kwargs["text_embeds"] = conditional_embeds.pooled_embeds |
|
added_cond_kwargs['time_ids'] = self.sd.get_time_ids_from_latents(noisy_latents) |
|
down_block_res_samples, mid_block_res_sample = adapter( |
|
noisy_latents, |
|
timesteps, |
|
encoder_hidden_states=conditional_embeds.text_embeds, |
|
controlnet_cond=adapter_images, |
|
conditioning_scale=1.0, |
|
guess_mode=False, |
|
added_cond_kwargs=added_cond_kwargs, |
|
return_dict=False, |
|
) |
|
pred_kwargs['down_block_additional_residuals'] = down_block_res_samples |
|
pred_kwargs['mid_block_additional_residual'] = mid_block_res_sample |
|
|
|
self.before_unet_predict() |
|
|
|
if batch.unconditional_latents is not None or self.do_guided_loss: |
|
|
|
loss = self.get_guided_loss( |
|
noisy_latents=noisy_latents, |
|
conditional_embeds=conditional_embeds, |
|
match_adapter_assist=match_adapter_assist, |
|
network_weight_list=network_weight_list, |
|
timesteps=timesteps, |
|
pred_kwargs=pred_kwargs, |
|
batch=batch, |
|
noise=noise, |
|
unconditional_embeds=unconditional_embeds, |
|
mask_multiplier=mask_multiplier, |
|
prior_pred=prior_pred, |
|
) |
|
|
|
else: |
|
with self.timer('predict_unet'): |
|
if unconditional_embeds is not None: |
|
unconditional_embeds = unconditional_embeds.to(self.device_torch, dtype=dtype).detach() |
|
noise_pred = self.predict_noise( |
|
noisy_latents=noisy_latents.to(self.device_torch, dtype=dtype), |
|
timesteps=timesteps, |
|
conditional_embeds=conditional_embeds.to(self.device_torch, dtype=dtype), |
|
unconditional_embeds=unconditional_embeds, |
|
**pred_kwargs |
|
) |
|
self.after_unet_predict() |
|
|
|
with self.timer('calculate_loss'): |
|
noise = noise.to(self.device_torch, dtype=dtype).detach() |
|
loss = self.calculate_loss( |
|
noise_pred=noise_pred, |
|
noise=noise, |
|
noisy_latents=noisy_latents, |
|
timesteps=timesteps, |
|
batch=batch, |
|
mask_multiplier=mask_multiplier, |
|
prior_pred=prior_pred, |
|
) |
|
|
|
if torch.isnan(loss): |
|
print("loss is nan") |
|
loss = torch.zeros_like(loss).requires_grad_(True) |
|
|
|
with self.timer('backward'): |
|
|
|
loss = loss * loss_multiplier.mean() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not self.do_grad_scale: |
|
loss.backward() |
|
else: |
|
self.scaler.scale(loss).backward() |
|
|
|
return loss.detach() |
|
|
|
|
|
def hook_train_loop(self, batch: Union[DataLoaderBatchDTO, List[DataLoaderBatchDTO]]): |
|
if isinstance(batch, list): |
|
batch_list = batch |
|
else: |
|
batch_list = [batch] |
|
total_loss = None |
|
self.optimizer.zero_grad() |
|
for batch in batch_list: |
|
loss = self.train_single_accumulation(batch) |
|
if total_loss is None: |
|
total_loss = loss |
|
else: |
|
total_loss += loss |
|
if len(batch_list) > 1 and self.model_config.low_vram: |
|
torch.cuda.empty_cache() |
|
|
|
|
|
if not self.is_grad_accumulation_step: |
|
|
|
if self.train_config.optimizer != 'adafactor': |
|
if self.do_grad_scale: |
|
self.scaler.unscale_(self.optimizer) |
|
if isinstance(self.params[0], dict): |
|
for i in range(len(self.params)): |
|
torch.nn.utils.clip_grad_norm_(self.params[i]['params'], self.train_config.max_grad_norm) |
|
else: |
|
torch.nn.utils.clip_grad_norm_(self.params, self.train_config.max_grad_norm) |
|
|
|
with self.timer('optimizer_step'): |
|
|
|
if not self.do_grad_scale: |
|
self.optimizer.step() |
|
else: |
|
self.scaler.step(self.optimizer) |
|
self.scaler.update() |
|
|
|
self.optimizer.zero_grad(set_to_none=True) |
|
if self.adapter and isinstance(self.adapter, CustomAdapter): |
|
self.adapter.post_weight_update() |
|
if self.ema is not None: |
|
with self.timer('ema_update'): |
|
self.ema.update() |
|
else: |
|
|
|
pass |
|
|
|
|
|
with self.timer('scheduler_step'): |
|
self.lr_scheduler.step() |
|
|
|
if self.embedding is not None: |
|
with self.timer('restore_embeddings'): |
|
|
|
self.embedding.restore_embeddings() |
|
if self.adapter is not None and isinstance(self.adapter, ClipVisionAdapter): |
|
with self.timer('restore_adapter'): |
|
|
|
self.adapter.restore_embeddings() |
|
|
|
loss_dict = OrderedDict( |
|
{'loss': loss.item()} |
|
) |
|
|
|
self.end_of_training_loop() |
|
|
|
return loss_dict |
|
|