Spaces:
Runtime error
Runtime error
File size: 6,436 Bytes
7e73fec |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
import copy
import torch
from ldm_patched.modules.model_patcher import ModelPatcher
from ldm_patched.modules.sample import convert_cond
from ldm_patched.modules.samplers import encode_model_conds
class UnetPatcher(ModelPatcher):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.controlnet_linked_list = None
self.extra_preserved_memory_during_sampling = 0
self.extra_model_patchers_during_sampling = []
def clone(self):
n = UnetPatcher(self.model, self.load_device, self.offload_device, self.size, self.current_device,
weight_inplace_update=self.weight_inplace_update)
n.patches = {}
for k in self.patches:
n.patches[k] = self.patches[k][:]
n.object_patches = self.object_patches.copy()
n.model_options = copy.deepcopy(self.model_options)
n.model_keys = self.model_keys
n.controlnet_linked_list = self.controlnet_linked_list
n.extra_preserved_memory_during_sampling = self.extra_preserved_memory_during_sampling
n.extra_model_patchers_during_sampling = self.extra_model_patchers_during_sampling.copy()
return n
def add_extra_preserved_memory_during_sampling(self, memory_in_bytes: int):
# Use this to ask Forge to preserve a certain amount of memory during sampling.
# If GPU VRAM is 8 GB, and memory_in_bytes is 2GB, i.e., memory_in_bytes = 2 * 1024 * 1024 * 1024
# Then the sampling will always use less than 6GB memory by dynamically offload modules to CPU RAM.
# You can estimate this using model_management.module_size(any_pytorch_model) to get size of any pytorch models.
self.extra_preserved_memory_during_sampling += memory_in_bytes
return
def add_extra_model_patcher_during_sampling(self, model_patcher: ModelPatcher):
# Use this to ask Forge to move extra model patchers to GPU during sampling.
# This method will manage GPU memory perfectly.
self.extra_model_patchers_during_sampling.append(model_patcher)
return
def add_extra_torch_module_during_sampling(self, m: torch.nn.Module, cast_to_unet_dtype: bool = True):
# Use this method to bind an extra torch.nn.Module to this UNet during sampling.
# This model `m` will be delegated to Forge memory management system.
# `m` will be loaded to GPU everytime when sampling starts.
# `m` will be unloaded if necessary.
# `m` will influence Forge's judgement about use GPU memory or
# capacity and decide whether to use module offload to make user's batch size larger.
# Use cast_to_unet_dtype if you want `m` to have same dtype with unet during sampling.
if cast_to_unet_dtype:
m.to(self.model.diffusion_model.dtype)
patcher = ModelPatcher(model=m, load_device=self.load_device, offload_device=self.offload_device)
self.add_extra_model_patcher_during_sampling(patcher)
return patcher
def add_patched_controlnet(self, cnet):
cnet.set_previous_controlnet(self.controlnet_linked_list)
self.controlnet_linked_list = cnet
return
def list_controlnets(self):
results = []
pointer = self.controlnet_linked_list
while pointer is not None:
results.append(pointer)
pointer = pointer.previous_controlnet
return results
def append_model_option(self, k, v, ensure_uniqueness=False):
if k not in self.model_options:
self.model_options[k] = []
if ensure_uniqueness and v in self.model_options[k]:
return
self.model_options[k].append(v)
return
def append_transformer_option(self, k, v, ensure_uniqueness=False):
if 'transformer_options' not in self.model_options:
self.model_options['transformer_options'] = {}
to = self.model_options['transformer_options']
if k not in to:
to[k] = []
if ensure_uniqueness and v in to[k]:
return
to[k].append(v)
return
def add_conditioning_modifier(self, modifier, ensure_uniqueness=False):
self.append_model_option('conditioning_modifiers', modifier, ensure_uniqueness)
return
def add_alphas_cumprod_modifier(self, modifier, ensure_uniqueness=False):
"""
For some reasons, this function only works in A1111's Script.process_batch(self, p, *args, **kwargs)
For example, below is a worked modification:
class ExampleScript(scripts.Script):
def process_batch(self, p, *args, **kwargs):
unet = p.sd_model.forge_objects.unet.clone()
def modifier(x):
return x ** 0.5
unet.add_alphas_cumprod_modifier(modifier)
p.sd_model.forge_objects.unet = unet
return
This add_alphas_cumprod_modifier is the only patch option that should be used in process_batch()
All other patch options should be called in process_before_every_sampling()
"""
self.append_model_option('alphas_cumprod_modifiers', modifier, ensure_uniqueness)
return
def add_block_modifier(self, modifier, ensure_uniqueness=False):
self.append_transformer_option('block_modifiers', modifier, ensure_uniqueness)
return
def add_block_inner_modifier(self, modifier, ensure_uniqueness=False):
self.append_transformer_option('block_inner_modifiers', modifier, ensure_uniqueness)
return
def add_controlnet_conditioning_modifier(self, modifier, ensure_uniqueness=False):
self.append_transformer_option('controlnet_conditioning_modifiers', modifier, ensure_uniqueness)
return
def set_model_replace_all(self, patch, target="attn1"):
for block_name in ['input', 'middle', 'output']:
for number in range(16):
for transformer_index in range(16):
self.set_model_patch_replace(patch, target, block_name, number, transformer_index)
return
def encode_conds_after_clip(self, conds, noise, prompt_type="positive"):
return encode_model_conds(
model_function=self.model.extra_conds,
conds=convert_cond(conds),
noise=noise,
device=noise.device,
prompt_type=prompt_type
)
|