Cloned from Stable-X/stable-normal-v0-1
Browse files- .gitattributes +1 -0
- README.md +4 -0
- controlnet/config.json +57 -0
- controlnet/controlnetvae.py +250 -0
- controlnet/diffusion_pytorch_model.bin +3 -0
- controlnet/diffusion_pytorch_model.fp16.safetensors +3 -0
- dino_controlnet/config.json +66 -0
- dino_controlnet/diffusion_pytorch_model.bin +3 -0
- dino_controlnet/diffusion_pytorch_model.fp16.safetensors +3 -0
- dino_controlnet/dino_controlnetvae.py +403 -0
- model_index.json +42 -0
- scheduler/scheduler_config.json +19 -0
- text_encoder/config.json +25 -0
- text_encoder/model.fp16.safetensors +3 -0
- text_encoder/pytorch_model.bin +3 -0
- tokenizer/merges.txt +0 -0
- tokenizer/special_tokens_map.json +30 -0
- tokenizer/tokenizer_config.json +38 -0
- tokenizer/vocab.json +0 -0
- unet/config.json +73 -0
- unet/diffusion_pytorch_model.bin +3 -0
- unet/diffusion_pytorch_model.fp16.safetensors +3 -0
- vae/config.json +34 -0
- vae/diffusion_pytorch_model.bin +3 -0
- vae/diffusion_pytorch_model.fp16.safetensors +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
library_name: diffusers
|
4 |
+
---
|
controlnet/config.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "ControlNetVAEModel",
|
3 |
+
"_diffusers_version": "0.28.0",
|
4 |
+
"_name_or_path": "weights/stable-normal-v0-1/controlnet",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"addition_embed_type": null,
|
7 |
+
"addition_embed_type_num_heads": 64,
|
8 |
+
"addition_time_embed_dim": null,
|
9 |
+
"attention_head_dim": [
|
10 |
+
5,
|
11 |
+
10,
|
12 |
+
20,
|
13 |
+
20
|
14 |
+
],
|
15 |
+
"block_out_channels": [
|
16 |
+
320,
|
17 |
+
640,
|
18 |
+
1280,
|
19 |
+
1280
|
20 |
+
],
|
21 |
+
"class_embed_type": null,
|
22 |
+
"conditioning_channels": 3,
|
23 |
+
"conditioning_embedding_out_channels": [
|
24 |
+
16,
|
25 |
+
32,
|
26 |
+
96,
|
27 |
+
256
|
28 |
+
],
|
29 |
+
"controlnet_conditioning_channel_order": "rgb",
|
30 |
+
"cross_attention_dim": 1024,
|
31 |
+
"down_block_types": [
|
32 |
+
"CrossAttnDownBlock2D",
|
33 |
+
"CrossAttnDownBlock2D",
|
34 |
+
"CrossAttnDownBlock2D",
|
35 |
+
"DownBlock2D"
|
36 |
+
],
|
37 |
+
"downsample_padding": 1,
|
38 |
+
"encoder_hid_dim": null,
|
39 |
+
"encoder_hid_dim_type": null,
|
40 |
+
"flip_sin_to_cos": true,
|
41 |
+
"freq_shift": 0,
|
42 |
+
"global_pool_conditions": false,
|
43 |
+
"in_channels": 4,
|
44 |
+
"layers_per_block": 2,
|
45 |
+
"mid_block_scale_factor": 1,
|
46 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
47 |
+
"norm_eps": 1e-05,
|
48 |
+
"norm_num_groups": 32,
|
49 |
+
"num_attention_heads": null,
|
50 |
+
"num_class_embeds": null,
|
51 |
+
"only_cross_attention": false,
|
52 |
+
"projection_class_embeddings_input_dim": null,
|
53 |
+
"resnet_time_scale_shift": "default",
|
54 |
+
"transformer_layers_per_block": 1,
|
55 |
+
"upcast_attention": true,
|
56 |
+
"use_linear_projection": true
|
57 |
+
}
|
controlnet/controlnetvae.py
ADDED
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from dataclasses import dataclass
|
15 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
16 |
+
|
17 |
+
import torch
|
18 |
+
from torch import nn
|
19 |
+
from torch.nn import functional as F
|
20 |
+
|
21 |
+
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
22 |
+
from diffusers.loaders.single_file_model import FromOriginalModelMixin
|
23 |
+
from diffusers.utils import BaseOutput, logging
|
24 |
+
from diffusers.models.attention_processor import (
|
25 |
+
ADDED_KV_ATTENTION_PROCESSORS,
|
26 |
+
CROSS_ATTENTION_PROCESSORS,
|
27 |
+
AttentionProcessor,
|
28 |
+
AttnAddedKVProcessor,
|
29 |
+
AttnProcessor,
|
30 |
+
)
|
31 |
+
from diffusers.models.embeddings import TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps
|
32 |
+
from diffusers.models.modeling_utils import ModelMixin
|
33 |
+
from diffusers.models.unets.unet_2d_blocks import (
|
34 |
+
CrossAttnDownBlock2D,
|
35 |
+
DownBlock2D,
|
36 |
+
UNetMidBlock2D,
|
37 |
+
UNetMidBlock2DCrossAttn,
|
38 |
+
get_down_block,
|
39 |
+
)
|
40 |
+
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
41 |
+
from diffusers.models.controlnet import ControlNetOutput
|
42 |
+
from diffusers.models import ControlNetModel
|
43 |
+
|
44 |
+
import pdb
|
45 |
+
|
46 |
+
|
47 |
+
class ControlNetVAEModel(ControlNetModel):
|
48 |
+
def forward(
|
49 |
+
self,
|
50 |
+
sample: torch.Tensor,
|
51 |
+
timestep: Union[torch.Tensor, float, int],
|
52 |
+
encoder_hidden_states: torch.Tensor,
|
53 |
+
controlnet_cond: torch.Tensor = None,
|
54 |
+
conditioning_scale: float = 1.0,
|
55 |
+
class_labels: Optional[torch.Tensor] = None,
|
56 |
+
timestep_cond: Optional[torch.Tensor] = None,
|
57 |
+
attention_mask: Optional[torch.Tensor] = None,
|
58 |
+
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
|
59 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
60 |
+
guess_mode: bool = False,
|
61 |
+
return_dict: bool = True,
|
62 |
+
) -> Union[ControlNetOutput, Tuple[Tuple[torch.Tensor, ...], torch.Tensor]]:
|
63 |
+
"""
|
64 |
+
The [`ControlNetVAEModel`] forward method.
|
65 |
+
|
66 |
+
Args:
|
67 |
+
sample (`torch.Tensor`):
|
68 |
+
The noisy input tensor.
|
69 |
+
timestep (`Union[torch.Tensor, float, int]`):
|
70 |
+
The number of timesteps to denoise an input.
|
71 |
+
encoder_hidden_states (`torch.Tensor`):
|
72 |
+
The encoder hidden states.
|
73 |
+
controlnet_cond (`torch.Tensor`):
|
74 |
+
The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
|
75 |
+
conditioning_scale (`float`, defaults to `1.0`):
|
76 |
+
The scale factor for ControlNet outputs.
|
77 |
+
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
|
78 |
+
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
|
79 |
+
timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
|
80 |
+
Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the
|
81 |
+
timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep
|
82 |
+
embeddings.
|
83 |
+
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
|
84 |
+
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
|
85 |
+
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
|
86 |
+
negative values to the attention scores corresponding to "discard" tokens.
|
87 |
+
added_cond_kwargs (`dict`):
|
88 |
+
Additional conditions for the Stable Diffusion XL UNet.
|
89 |
+
cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
|
90 |
+
A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
|
91 |
+
guess_mode (`bool`, defaults to `False`):
|
92 |
+
In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if
|
93 |
+
you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended.
|
94 |
+
return_dict (`bool`, defaults to `True`):
|
95 |
+
Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple.
|
96 |
+
|
97 |
+
Returns:
|
98 |
+
[`~models.controlnet.ControlNetOutput`] **or** `tuple`:
|
99 |
+
If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is
|
100 |
+
returned where the first element is the sample tensor.
|
101 |
+
"""
|
102 |
+
# check channel order
|
103 |
+
|
104 |
+
|
105 |
+
channel_order = self.config.controlnet_conditioning_channel_order
|
106 |
+
|
107 |
+
if channel_order == "rgb":
|
108 |
+
# in rgb order by default
|
109 |
+
...
|
110 |
+
elif channel_order == "bgr":
|
111 |
+
controlnet_cond = torch.flip(controlnet_cond, dims=[1])
|
112 |
+
else:
|
113 |
+
raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
|
114 |
+
|
115 |
+
# prepare attention_mask
|
116 |
+
if attention_mask is not None:
|
117 |
+
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
|
118 |
+
attention_mask = attention_mask.unsqueeze(1)
|
119 |
+
|
120 |
+
# 1. time
|
121 |
+
timesteps = timestep
|
122 |
+
if not torch.is_tensor(timesteps):
|
123 |
+
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
|
124 |
+
# This would be a good case for the `match` statement (Python 3.10+)
|
125 |
+
is_mps = sample.device.type == "mps"
|
126 |
+
if isinstance(timestep, float):
|
127 |
+
dtype = torch.float32 if is_mps else torch.float64
|
128 |
+
else:
|
129 |
+
dtype = torch.int32 if is_mps else torch.int64
|
130 |
+
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
|
131 |
+
elif len(timesteps.shape) == 0:
|
132 |
+
timesteps = timesteps[None].to(sample.device)
|
133 |
+
|
134 |
+
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
135 |
+
timesteps = timesteps.expand(sample.shape[0])
|
136 |
+
|
137 |
+
t_emb = self.time_proj(timesteps)
|
138 |
+
|
139 |
+
# timesteps does not contain any weights and will always return f32 tensors
|
140 |
+
# but time_embedding might actually be running in fp16. so we need to cast here.
|
141 |
+
# there might be better ways to encapsulate this.
|
142 |
+
t_emb = t_emb.to(dtype=sample.dtype)
|
143 |
+
|
144 |
+
emb = self.time_embedding(t_emb, timestep_cond)
|
145 |
+
aug_emb = None
|
146 |
+
|
147 |
+
if self.class_embedding is not None:
|
148 |
+
if class_labels is None:
|
149 |
+
raise ValueError("class_labels should be provided when num_class_embeds > 0")
|
150 |
+
|
151 |
+
if self.config.class_embed_type == "timestep":
|
152 |
+
class_labels = self.time_proj(class_labels)
|
153 |
+
|
154 |
+
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
|
155 |
+
emb = emb + class_emb
|
156 |
+
|
157 |
+
if self.config.addition_embed_type is not None:
|
158 |
+
if self.config.addition_embed_type == "text":
|
159 |
+
aug_emb = self.add_embedding(encoder_hidden_states)
|
160 |
+
|
161 |
+
elif self.config.addition_embed_type == "text_time":
|
162 |
+
if "text_embeds" not in added_cond_kwargs:
|
163 |
+
raise ValueError(
|
164 |
+
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
|
165 |
+
)
|
166 |
+
text_embeds = added_cond_kwargs.get("text_embeds")
|
167 |
+
if "time_ids" not in added_cond_kwargs:
|
168 |
+
raise ValueError(
|
169 |
+
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
|
170 |
+
)
|
171 |
+
time_ids = added_cond_kwargs.get("time_ids")
|
172 |
+
time_embeds = self.add_time_proj(time_ids.flatten())
|
173 |
+
time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
|
174 |
+
|
175 |
+
add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
|
176 |
+
add_embeds = add_embeds.to(emb.dtype)
|
177 |
+
aug_emb = self.add_embedding(add_embeds)
|
178 |
+
|
179 |
+
|
180 |
+
emb = emb + aug_emb if aug_emb is not None else emb
|
181 |
+
# 2. pre-process
|
182 |
+
sample = self.conv_in(sample)
|
183 |
+
|
184 |
+
# 3. down
|
185 |
+
down_block_res_samples = (sample,)
|
186 |
+
for downsample_block in self.down_blocks:
|
187 |
+
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
|
188 |
+
sample, res_samples = downsample_block(
|
189 |
+
hidden_states=sample,
|
190 |
+
temb=emb,
|
191 |
+
encoder_hidden_states=encoder_hidden_states,
|
192 |
+
attention_mask=attention_mask,
|
193 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
194 |
+
)
|
195 |
+
else:
|
196 |
+
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
|
197 |
+
|
198 |
+
down_block_res_samples += res_samples
|
199 |
+
|
200 |
+
# 4. mid
|
201 |
+
if self.mid_block is not None:
|
202 |
+
if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
|
203 |
+
sample = self.mid_block(
|
204 |
+
sample,
|
205 |
+
emb,
|
206 |
+
encoder_hidden_states=encoder_hidden_states,
|
207 |
+
attention_mask=attention_mask,
|
208 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
209 |
+
)
|
210 |
+
else:
|
211 |
+
sample = self.mid_block(sample, emb)
|
212 |
+
|
213 |
+
# 5. Control net blocks
|
214 |
+
|
215 |
+
controlnet_down_block_res_samples = ()
|
216 |
+
|
217 |
+
# NOTE that controlnet downblock is zeroconv, we discard
|
218 |
+
for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
|
219 |
+
down_block_res_sample = down_block_res_sample
|
220 |
+
controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,)
|
221 |
+
|
222 |
+
down_block_res_samples = controlnet_down_block_res_samples
|
223 |
+
|
224 |
+
mid_block_res_sample = sample
|
225 |
+
|
226 |
+
# 6. scaling
|
227 |
+
if guess_mode and not self.config.global_pool_conditions:
|
228 |
+
scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0
|
229 |
+
scales = scales * conditioning_scale
|
230 |
+
down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)]
|
231 |
+
mid_block_res_sample = mid_block_res_sample * scales[-1] # last one
|
232 |
+
else:
|
233 |
+
down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
|
234 |
+
mid_block_res_sample = mid_block_res_sample * conditioning_scale
|
235 |
+
|
236 |
+
if self.config.global_pool_conditions:
|
237 |
+
down_block_res_samples = [
|
238 |
+
torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples
|
239 |
+
]
|
240 |
+
mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True)
|
241 |
+
|
242 |
+
if not return_dict:
|
243 |
+
return (down_block_res_samples, mid_block_res_sample)
|
244 |
+
|
245 |
+
return ControlNetOutput(
|
246 |
+
down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
|
247 |
+
)
|
248 |
+
|
249 |
+
|
250 |
+
|
controlnet/diffusion_pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac4ca1eab421c7b80e8b80bd37f29b56116aecb63712ed68d469b7620c4ba14d
|
3 |
+
size 1457051786
|
controlnet/diffusion_pytorch_model.fp16.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac412aa024ffbb0b1cbba7e935cbf8cd0eec59dc7f80ed6f1dfacebe669fb19d
|
3 |
+
size 728496840
|
dino_controlnet/config.json
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "DINOControlNetVAEModel",
|
3 |
+
"_diffusers_version": "0.28.0",
|
4 |
+
"_name_or_path": "weights/stable-normal-v0-1/dino_controlnet",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"addition_embed_type": null,
|
7 |
+
"addition_embed_type_num_heads": 64,
|
8 |
+
"addition_time_embed_dim": null,
|
9 |
+
"attention_head_dim": [
|
10 |
+
5,
|
11 |
+
10,
|
12 |
+
20,
|
13 |
+
20
|
14 |
+
],
|
15 |
+
"block_out_channels": [
|
16 |
+
320,
|
17 |
+
640,
|
18 |
+
1280,
|
19 |
+
1280
|
20 |
+
],
|
21 |
+
"class_embed_type": null,
|
22 |
+
"conditioning_channels": 3,
|
23 |
+
"conditioning_embedding_out_channels": [
|
24 |
+
16,
|
25 |
+
32,
|
26 |
+
96,
|
27 |
+
256
|
28 |
+
],
|
29 |
+
"controlnet_conditioning_channel_order": "rgb",
|
30 |
+
"cross_attention_dim": 1024,
|
31 |
+
"dino_block_out_channels": [
|
32 |
+
512,
|
33 |
+
128,
|
34 |
+
256,
|
35 |
+
256
|
36 |
+
],
|
37 |
+
"dino_conditioning_channels": 1024,
|
38 |
+
"dino_conditioning_embedding_channels": 320,
|
39 |
+
"dino_up_sampling": "transpose",
|
40 |
+
"down_block_types": [
|
41 |
+
"CrossAttnDownBlock2D",
|
42 |
+
"CrossAttnDownBlock2D",
|
43 |
+
"CrossAttnDownBlock2D",
|
44 |
+
"DownBlock2D"
|
45 |
+
],
|
46 |
+
"downsample_padding": 1,
|
47 |
+
"encoder_hid_dim": null,
|
48 |
+
"encoder_hid_dim_type": null,
|
49 |
+
"flip_sin_to_cos": true,
|
50 |
+
"freq_shift": 0,
|
51 |
+
"global_pool_conditions": false,
|
52 |
+
"in_channels": 4,
|
53 |
+
"layers_per_block": 2,
|
54 |
+
"mid_block_scale_factor": 1,
|
55 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
56 |
+
"norm_eps": 1e-05,
|
57 |
+
"norm_num_groups": 32,
|
58 |
+
"num_attention_heads": null,
|
59 |
+
"num_class_embeds": null,
|
60 |
+
"only_cross_attention": false,
|
61 |
+
"projection_class_embeddings_input_dim": null,
|
62 |
+
"resnet_time_scale_shift": "default",
|
63 |
+
"transformer_layers_per_block": 1,
|
64 |
+
"upcast_attention": true,
|
65 |
+
"use_linear_projection": true
|
66 |
+
}
|
dino_controlnet/diffusion_pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a9ef3dca5d6264c8dad5c0adff5100649dddfe3f39d2829743dc4397efc20668
|
3 |
+
size 1499469162
|
dino_controlnet/diffusion_pytorch_model.fp16.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:97a89293403c71b33376a941b98fdb8be5c3cb42ad3b6ce0f34c0fb74a3b0a42
|
3 |
+
size 749704376
|
dino_controlnet/dino_controlnetvae.py
ADDED
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from dataclasses import dataclass
|
15 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
16 |
+
|
17 |
+
import torch
|
18 |
+
from torch import nn
|
19 |
+
from torch.nn import functional as F
|
20 |
+
|
21 |
+
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
22 |
+
from diffusers.loaders.single_file_model import FromOriginalModelMixin
|
23 |
+
from diffusers.utils import BaseOutput, logging
|
24 |
+
from diffusers.models.attention_processor import (
|
25 |
+
ADDED_KV_ATTENTION_PROCESSORS,
|
26 |
+
CROSS_ATTENTION_PROCESSORS,
|
27 |
+
AttentionProcessor,
|
28 |
+
AttnAddedKVProcessor,
|
29 |
+
AttnProcessor,
|
30 |
+
)
|
31 |
+
from diffusers.models.embeddings import TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps
|
32 |
+
from diffusers.models.modeling_utils import ModelMixin
|
33 |
+
from diffusers.models.unets.unet_2d_blocks import (
|
34 |
+
CrossAttnDownBlock2D,
|
35 |
+
DownBlock2D,
|
36 |
+
UNetMidBlock2D,
|
37 |
+
UNetMidBlock2DCrossAttn,
|
38 |
+
get_down_block,
|
39 |
+
)
|
40 |
+
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
41 |
+
from diffusers.models.controlnet import ControlNetOutput
|
42 |
+
from diffusers.models import ControlNetModel
|
43 |
+
|
44 |
+
import pdb
|
45 |
+
|
46 |
+
|
47 |
+
def conv_nd(dims, *args, **kwargs):
|
48 |
+
"""
|
49 |
+
Create a 1D, 2D, or 3D convolution module.
|
50 |
+
"""
|
51 |
+
if dims == 1:
|
52 |
+
return nn.Conv1d(*args, **kwargs)
|
53 |
+
elif dims == 2:
|
54 |
+
return nn.Conv2d(*args, **kwargs)
|
55 |
+
elif dims == 3:
|
56 |
+
return nn.Conv3d(*args, **kwargs)
|
57 |
+
raise ValueError(f"unsupported dimensions: {dims}")
|
58 |
+
|
59 |
+
def zero_module(module):
|
60 |
+
"""
|
61 |
+
Zero out the parameters of a module and return it.
|
62 |
+
"""
|
63 |
+
for p in module.parameters():
|
64 |
+
p.detach().zero_()
|
65 |
+
return module
|
66 |
+
|
67 |
+
|
68 |
+
class DINOControlNetConditioningEmbedding(nn.Module):
|
69 |
+
def __init__(
|
70 |
+
self,
|
71 |
+
conditioning_embedding_channels: int,
|
72 |
+
conditioning_channels: int = 3,
|
73 |
+
block_out_channels = (16, 32, 64, 128),
|
74 |
+
up_sampling='transpose'
|
75 |
+
):
|
76 |
+
super().__init__()
|
77 |
+
|
78 |
+
self.conv_in = conv_nd(
|
79 |
+
2, conditioning_channels, block_out_channels[0], kernel_size=3, padding=1
|
80 |
+
)
|
81 |
+
|
82 |
+
self.blocks = nn.ModuleList([])
|
83 |
+
|
84 |
+
for i in range(len(block_out_channels) - 1):
|
85 |
+
channel_in = block_out_channels[i]
|
86 |
+
channel_out = block_out_channels[i + 1]
|
87 |
+
self.blocks.append(
|
88 |
+
conv_nd(2, channel_in, channel_in, kernel_size=3, padding=1)
|
89 |
+
)
|
90 |
+
self.blocks.append(
|
91 |
+
conv_nd(
|
92 |
+
2, channel_in, channel_out, kernel_size=3, padding=1, stride=1
|
93 |
+
)
|
94 |
+
)
|
95 |
+
|
96 |
+
if up_sampling == 'transpose':
|
97 |
+
self.conv_out = zero_module(
|
98 |
+
nn.ConvTranspose2d(
|
99 |
+
in_channels=block_out_channels[-1],
|
100 |
+
out_channels=conditioning_embedding_channels,
|
101 |
+
kernel_size=4,
|
102 |
+
stride=2,
|
103 |
+
padding=1,
|
104 |
+
)
|
105 |
+
)
|
106 |
+
else:
|
107 |
+
self.conv_out = zero_module(conv_nd(dims, block_out_channels[-1], conditioning_embedding_channels, 3, padding=1))
|
108 |
+
|
109 |
+
def forward(self, conditioning):
|
110 |
+
|
111 |
+
|
112 |
+
embedding = self.conv_in(conditioning)
|
113 |
+
embedding = F.silu(embedding)
|
114 |
+
|
115 |
+
|
116 |
+
for block in self.blocks:
|
117 |
+
embedding = block(embedding)
|
118 |
+
embedding = F.silu(embedding)
|
119 |
+
|
120 |
+
embedding = self.conv_out(embedding)
|
121 |
+
|
122 |
+
return embedding
|
123 |
+
|
124 |
+
|
125 |
+
class DINOControlNetVAEModel(ControlNetModel):
|
126 |
+
@register_to_config
|
127 |
+
def __init__(
|
128 |
+
self,
|
129 |
+
in_channels: int = 4,
|
130 |
+
conditioning_channels: int = 3,
|
131 |
+
flip_sin_to_cos: bool = True,
|
132 |
+
freq_shift: int = 0,
|
133 |
+
down_block_types: Tuple[str, ...] = (
|
134 |
+
"CrossAttnDownBlock2D",
|
135 |
+
"CrossAttnDownBlock2D",
|
136 |
+
"CrossAttnDownBlock2D",
|
137 |
+
"DownBlock2D",
|
138 |
+
),
|
139 |
+
mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
|
140 |
+
only_cross_attention: Union[bool, Tuple[bool]] = False,
|
141 |
+
block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280),
|
142 |
+
layers_per_block: int = 2,
|
143 |
+
downsample_padding: int = 1,
|
144 |
+
mid_block_scale_factor: float = 1,
|
145 |
+
act_fn: str = "silu",
|
146 |
+
norm_num_groups: Optional[int] = 32,
|
147 |
+
norm_eps: float = 1e-5,
|
148 |
+
cross_attention_dim: int = 1280,
|
149 |
+
transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1,
|
150 |
+
encoder_hid_dim: Optional[int] = None,
|
151 |
+
encoder_hid_dim_type: Optional[str] = None,
|
152 |
+
attention_head_dim: Union[int, Tuple[int, ...]] = 8,
|
153 |
+
num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None,
|
154 |
+
use_linear_projection: bool = False,
|
155 |
+
class_embed_type: Optional[str] = None,
|
156 |
+
addition_embed_type: Optional[str] = None,
|
157 |
+
addition_time_embed_dim: Optional[int] = None,
|
158 |
+
num_class_embeds: Optional[int] = None,
|
159 |
+
upcast_attention: bool = False,
|
160 |
+
resnet_time_scale_shift: str = "default",
|
161 |
+
projection_class_embeddings_input_dim: Optional[int] = None,
|
162 |
+
controlnet_conditioning_channel_order: str = "rgb",
|
163 |
+
conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256),
|
164 |
+
global_pool_conditions: bool = False,
|
165 |
+
addition_embed_type_num_heads: int = 64,
|
166 |
+
dino_up_sampling='transpose',
|
167 |
+
dino_conditioning_embedding_channels = 320,
|
168 |
+
dino_conditioning_channels = 1024,
|
169 |
+
dino_block_out_channels = [512, 128, 256, 256],
|
170 |
+
):
|
171 |
+
super().__init__(
|
172 |
+
in_channels,
|
173 |
+
conditioning_channels,
|
174 |
+
flip_sin_to_cos,
|
175 |
+
freq_shift,
|
176 |
+
down_block_types,
|
177 |
+
mid_block_type,
|
178 |
+
only_cross_attention,
|
179 |
+
block_out_channels,
|
180 |
+
layers_per_block,
|
181 |
+
downsample_padding,
|
182 |
+
mid_block_scale_factor,
|
183 |
+
act_fn,
|
184 |
+
norm_num_groups,
|
185 |
+
norm_eps,
|
186 |
+
cross_attention_dim,
|
187 |
+
transformer_layers_per_block,
|
188 |
+
encoder_hid_dim,
|
189 |
+
encoder_hid_dim_type,
|
190 |
+
attention_head_dim,
|
191 |
+
num_attention_heads,
|
192 |
+
use_linear_projection,
|
193 |
+
class_embed_type,
|
194 |
+
addition_embed_type,
|
195 |
+
addition_time_embed_dim,
|
196 |
+
num_class_embeds,
|
197 |
+
upcast_attention,
|
198 |
+
resnet_time_scale_shift,
|
199 |
+
projection_class_embeddings_input_dim,
|
200 |
+
controlnet_conditioning_channel_order,
|
201 |
+
conditioning_embedding_out_channels,
|
202 |
+
global_pool_conditions,
|
203 |
+
addition_embed_type_num_heads,
|
204 |
+
)
|
205 |
+
|
206 |
+
|
207 |
+
# dino controlnet embeddings
|
208 |
+
self.dino_controlnet_cond_embedding = DINOControlNetConditioningEmbedding(
|
209 |
+
up_sampling = dino_up_sampling,
|
210 |
+
conditioning_embedding_channels = dino_conditioning_embedding_channels,
|
211 |
+
conditioning_channels = dino_conditioning_channels,
|
212 |
+
block_out_channels = dino_block_out_channels ,
|
213 |
+
)
|
214 |
+
|
215 |
+
|
216 |
+
def forward(
|
217 |
+
self,
|
218 |
+
sample: torch.Tensor,
|
219 |
+
timestep: Union[torch.Tensor, float, int],
|
220 |
+
encoder_hidden_states: torch.Tensor,
|
221 |
+
controlnet_cond: torch.Tensor = None,
|
222 |
+
conditioning_scale: float = 1.0,
|
223 |
+
class_labels: Optional[torch.Tensor] = None,
|
224 |
+
timestep_cond: Optional[torch.Tensor] = None,
|
225 |
+
attention_mask: Optional[torch.Tensor] = None,
|
226 |
+
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
|
227 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
228 |
+
guess_mode: bool = False,
|
229 |
+
return_dict: bool = True,
|
230 |
+
) -> Union[ControlNetOutput, Tuple[Tuple[torch.Tensor, ...], torch.Tensor]]:
|
231 |
+
"""
|
232 |
+
The [`ControlNetVAEModel`] forward method.
|
233 |
+
|
234 |
+
Args:
|
235 |
+
sample (`torch.Tensor`):
|
236 |
+
The noisy input tensor.
|
237 |
+
timestep (`Union[torch.Tensor, float, int]`):
|
238 |
+
The number of timesteps to denoise an input.
|
239 |
+
encoder_hidden_states (`torch.Tensor`):
|
240 |
+
The encoder hidden states.
|
241 |
+
controlnet_cond (`torch.Tensor`):
|
242 |
+
The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
|
243 |
+
conditioning_scale (`float`, defaults to `1.0`):
|
244 |
+
The scale factor for ControlNet outputs.
|
245 |
+
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
|
246 |
+
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
|
247 |
+
timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
|
248 |
+
Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the
|
249 |
+
timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep
|
250 |
+
embeddings.
|
251 |
+
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
|
252 |
+
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
|
253 |
+
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
|
254 |
+
negative values to the attention scores corresponding to "discard" tokens.
|
255 |
+
added_cond_kwargs (`dict`):
|
256 |
+
Additional conditions for the Stable Diffusion XL UNet.
|
257 |
+
cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
|
258 |
+
A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
|
259 |
+
guess_mode (`bool`, defaults to `False`):
|
260 |
+
In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if
|
261 |
+
you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended.
|
262 |
+
return_dict (`bool`, defaults to `True`):
|
263 |
+
Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple.
|
264 |
+
|
265 |
+
Returns:
|
266 |
+
[`~models.controlnet.ControlNetOutput`] **or** `tuple`:
|
267 |
+
If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is
|
268 |
+
returned where the first element is the sample tensor.
|
269 |
+
"""
|
270 |
+
# check channel order
|
271 |
+
|
272 |
+
|
273 |
+
channel_order = self.config.controlnet_conditioning_channel_order
|
274 |
+
|
275 |
+
if channel_order == "rgb":
|
276 |
+
# in rgb order by default
|
277 |
+
...
|
278 |
+
elif channel_order == "bgr":
|
279 |
+
controlnet_cond = torch.flip(controlnet_cond, dims=[1])
|
280 |
+
else:
|
281 |
+
raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
|
282 |
+
|
283 |
+
# prepare attention_mask
|
284 |
+
if attention_mask is not None:
|
285 |
+
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
|
286 |
+
attention_mask = attention_mask.unsqueeze(1)
|
287 |
+
|
288 |
+
# 1. time
|
289 |
+
timesteps = timestep
|
290 |
+
if not torch.is_tensor(timesteps):
|
291 |
+
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
|
292 |
+
# This would be a good case for the `match` statement (Python 3.10+)
|
293 |
+
is_mps = sample.device.type == "mps"
|
294 |
+
if isinstance(timestep, float):
|
295 |
+
dtype = torch.float32 if is_mps else torch.float64
|
296 |
+
else:
|
297 |
+
dtype = torch.int32 if is_mps else torch.int64
|
298 |
+
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
|
299 |
+
elif len(timesteps.shape) == 0:
|
300 |
+
timesteps = timesteps[None].to(sample.device)
|
301 |
+
|
302 |
+
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
303 |
+
timesteps = timesteps.expand(sample.shape[0])
|
304 |
+
|
305 |
+
t_emb = self.time_proj(timesteps)
|
306 |
+
|
307 |
+
# timesteps does not contain any weights and will always return f32 tensors
|
308 |
+
# but time_embedding might actually be running in fp16. so we need to cast here.
|
309 |
+
# there might be better ways to encapsulate this.
|
310 |
+
t_emb = t_emb.to(dtype=sample.dtype)
|
311 |
+
|
312 |
+
emb = self.time_embedding(t_emb, timestep_cond)
|
313 |
+
aug_emb = None
|
314 |
+
|
315 |
+
if self.class_embedding is not None:
|
316 |
+
if class_labels is None:
|
317 |
+
raise ValueError("class_labels should be provided when num_class_embeds > 0")
|
318 |
+
|
319 |
+
if self.config.class_embed_type == "timestep":
|
320 |
+
class_labels = self.time_proj(class_labels)
|
321 |
+
|
322 |
+
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
|
323 |
+
emb = emb + class_emb
|
324 |
+
|
325 |
+
if self.config.addition_embed_type is not None:
|
326 |
+
if self.config.addition_embed_type == "text":
|
327 |
+
aug_emb = self.add_embedding(encoder_hidden_states)
|
328 |
+
|
329 |
+
elif self.config.addition_embed_type == "text_time":
|
330 |
+
if "text_embeds" not in added_cond_kwargs:
|
331 |
+
raise ValueError(
|
332 |
+
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
|
333 |
+
)
|
334 |
+
text_embeds = added_cond_kwargs.get("text_embeds")
|
335 |
+
if "time_ids" not in added_cond_kwargs:
|
336 |
+
raise ValueError(
|
337 |
+
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
|
338 |
+
)
|
339 |
+
time_ids = added_cond_kwargs.get("time_ids")
|
340 |
+
time_embeds = self.add_time_proj(time_ids.flatten())
|
341 |
+
time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
|
342 |
+
|
343 |
+
add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
|
344 |
+
add_embeds = add_embeds.to(emb.dtype)
|
345 |
+
aug_emb = self.add_embedding(add_embeds)
|
346 |
+
|
347 |
+
|
348 |
+
emb = emb + aug_emb if aug_emb is not None else emb
|
349 |
+
# 2. pre-process
|
350 |
+
# sample = self.conv_in(sample) # without input_blocks[0]
|
351 |
+
|
352 |
+
# 3. down
|
353 |
+
down_block_res_samples = (sample,)
|
354 |
+
for downsample_block in self.down_blocks:
|
355 |
+
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
|
356 |
+
sample, res_samples = downsample_block(
|
357 |
+
hidden_states=sample,
|
358 |
+
temb=emb,
|
359 |
+
encoder_hidden_states=encoder_hidden_states,
|
360 |
+
attention_mask=attention_mask,
|
361 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
362 |
+
)
|
363 |
+
else:
|
364 |
+
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
|
365 |
+
|
366 |
+
down_block_res_samples += res_samples
|
367 |
+
|
368 |
+
# 5. Control net blocks
|
369 |
+
# dino features without zero conv
|
370 |
+
controlnet_down_block_res_samples = (down_block_res_samples[0], )
|
371 |
+
|
372 |
+
for down_block_res_sample, controlnet_block in zip(down_block_res_samples[1:], self.controlnet_down_blocks[1:]):
|
373 |
+
down_block_res_sample = controlnet_block(down_block_res_sample)
|
374 |
+
controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,)
|
375 |
+
|
376 |
+
down_block_res_samples = controlnet_down_block_res_samples
|
377 |
+
|
378 |
+
|
379 |
+
mid_block_res_sample = None
|
380 |
+
|
381 |
+
# 6. scaling
|
382 |
+
if guess_mode and not self.config.global_pool_conditions:
|
383 |
+
scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0
|
384 |
+
scales = scales * conditioning_scale
|
385 |
+
down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)]
|
386 |
+
mid_block_res_sample = mid_block_res_sample * scales[-1] # last one
|
387 |
+
else:
|
388 |
+
down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
|
389 |
+
|
390 |
+
if self.config.global_pool_conditions:
|
391 |
+
down_block_res_samples = [
|
392 |
+
torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples
|
393 |
+
]
|
394 |
+
|
395 |
+
if not return_dict:
|
396 |
+
return (down_block_res_samples, mid_block_res_sample)
|
397 |
+
|
398 |
+
return ControlNetOutput(
|
399 |
+
down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
|
400 |
+
)
|
401 |
+
|
402 |
+
|
403 |
+
|
model_index.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "StableNormalPipeline",
|
3 |
+
"_diffusers_version": "0.28.0",
|
4 |
+
"_name_or_path": "weights/stable-normal-v0-1",
|
5 |
+
"controlnet": [
|
6 |
+
"controlnetvae",
|
7 |
+
"ControlNetVAEModel"
|
8 |
+
],
|
9 |
+
"dino_controlnet": [
|
10 |
+
"dino_controlnetvae",
|
11 |
+
"DINOControlNetVAEModel"
|
12 |
+
],
|
13 |
+
"feature_extractor": [
|
14 |
+
null,
|
15 |
+
null
|
16 |
+
],
|
17 |
+
"image_encoder": [
|
18 |
+
null,
|
19 |
+
null
|
20 |
+
],
|
21 |
+
"requires_safety_checker": true,
|
22 |
+
"safety_checker": [
|
23 |
+
null,
|
24 |
+
null
|
25 |
+
],
|
26 |
+
"text_encoder": [
|
27 |
+
"transformers",
|
28 |
+
"CLIPTextModel"
|
29 |
+
],
|
30 |
+
"tokenizer": [
|
31 |
+
"transformers",
|
32 |
+
"CLIPTokenizer"
|
33 |
+
],
|
34 |
+
"unet": [
|
35 |
+
"diffusers",
|
36 |
+
"UNet2DConditionModel"
|
37 |
+
],
|
38 |
+
"vae": [
|
39 |
+
"diffusers",
|
40 |
+
"AutoencoderKL"
|
41 |
+
]
|
42 |
+
}
|
scheduler/scheduler_config.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "HEURI_DDIMScheduler",
|
3 |
+
"_diffusers_version": "0.28.0",
|
4 |
+
"beta_end": 0.012,
|
5 |
+
"beta_schedule": "scaled_linear",
|
6 |
+
"beta_start": 0.00085,
|
7 |
+
"clip_sample": true,
|
8 |
+
"clip_sample_range": 1.0,
|
9 |
+
"dynamic_thresholding_ratio": 0.995,
|
10 |
+
"num_train_timesteps": 1000,
|
11 |
+
"prediction_type": "sample",
|
12 |
+
"rescale_betas_zero_snr": false,
|
13 |
+
"sample_max_value": 1.0,
|
14 |
+
"set_alpha_to_one": true,
|
15 |
+
"steps_offset": 0,
|
16 |
+
"thresholding": false,
|
17 |
+
"timestep_spacing": "leading",
|
18 |
+
"trained_betas": null
|
19 |
+
}
|
text_encoder/config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "weights/stable-normal-v0-1/text_encoder",
|
3 |
+
"architectures": [
|
4 |
+
"CLIPTextModel"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"dropout": 0.0,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_size": 1024,
|
12 |
+
"initializer_factor": 1.0,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 4096,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 77,
|
17 |
+
"model_type": "clip_text_model",
|
18 |
+
"num_attention_heads": 16,
|
19 |
+
"num_hidden_layers": 23,
|
20 |
+
"pad_token_id": 1,
|
21 |
+
"projection_dim": 512,
|
22 |
+
"torch_dtype": "float16",
|
23 |
+
"transformers_version": "4.41.1",
|
24 |
+
"vocab_size": 49408
|
25 |
+
}
|
text_encoder/model.fp16.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bc1827c465450322616f06dea41596eac7d493f4e95904dcb51f0fc745c4e13f
|
3 |
+
size 680820392
|
text_encoder/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a0c8295cf55d93f69f78b72d14d780579bdeca29680dc2ba95b649bb802dd967
|
3 |
+
size 1361674202
|
tokenizer/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer/special_tokens_map.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|startoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "!",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "<|endoftext|>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": true,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
+
}
|
tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"0": {
|
5 |
+
"content": "!",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": false,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
},
|
12 |
+
"49406": {
|
13 |
+
"content": "<|startoftext|>",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": true,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false,
|
18 |
+
"special": true
|
19 |
+
},
|
20 |
+
"49407": {
|
21 |
+
"content": "<|endoftext|>",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": true,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false,
|
26 |
+
"special": true
|
27 |
+
}
|
28 |
+
},
|
29 |
+
"bos_token": "<|startoftext|>",
|
30 |
+
"clean_up_tokenization_spaces": true,
|
31 |
+
"do_lower_case": true,
|
32 |
+
"eos_token": "<|endoftext|>",
|
33 |
+
"errors": "replace",
|
34 |
+
"model_max_length": 77,
|
35 |
+
"pad_token": "!",
|
36 |
+
"tokenizer_class": "CLIPTokenizer",
|
37 |
+
"unk_token": "<|endoftext|>"
|
38 |
+
}
|
tokenizer/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
unet/config.json
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.28.0",
|
4 |
+
"_name_or_path": "weights/stable-normal-v0-1/unet",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"addition_embed_type": null,
|
7 |
+
"addition_embed_type_num_heads": 64,
|
8 |
+
"addition_time_embed_dim": null,
|
9 |
+
"attention_head_dim": [
|
10 |
+
5,
|
11 |
+
10,
|
12 |
+
20,
|
13 |
+
20
|
14 |
+
],
|
15 |
+
"attention_type": "default",
|
16 |
+
"block_out_channels": [
|
17 |
+
320,
|
18 |
+
640,
|
19 |
+
1280,
|
20 |
+
1280
|
21 |
+
],
|
22 |
+
"center_input_sample": false,
|
23 |
+
"class_embed_type": null,
|
24 |
+
"class_embeddings_concat": false,
|
25 |
+
"conv_in_kernel": 3,
|
26 |
+
"conv_out_kernel": 3,
|
27 |
+
"cross_attention_dim": 1024,
|
28 |
+
"cross_attention_norm": null,
|
29 |
+
"down_block_types": [
|
30 |
+
"CrossAttnDownBlock2D",
|
31 |
+
"CrossAttnDownBlock2D",
|
32 |
+
"CrossAttnDownBlock2D",
|
33 |
+
"DownBlock2D"
|
34 |
+
],
|
35 |
+
"downsample_padding": 1,
|
36 |
+
"dropout": 0.0,
|
37 |
+
"dual_cross_attention": false,
|
38 |
+
"encoder_hid_dim": null,
|
39 |
+
"encoder_hid_dim_type": null,
|
40 |
+
"flip_sin_to_cos": true,
|
41 |
+
"freq_shift": 0,
|
42 |
+
"in_channels": 4,
|
43 |
+
"layers_per_block": 2,
|
44 |
+
"mid_block_only_cross_attention": null,
|
45 |
+
"mid_block_scale_factor": 1,
|
46 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
47 |
+
"norm_eps": 1e-05,
|
48 |
+
"norm_num_groups": 32,
|
49 |
+
"num_attention_heads": null,
|
50 |
+
"num_class_embeds": null,
|
51 |
+
"only_cross_attention": false,
|
52 |
+
"out_channels": 4,
|
53 |
+
"projection_class_embeddings_input_dim": null,
|
54 |
+
"resnet_out_scale_factor": 1.0,
|
55 |
+
"resnet_skip_time_act": false,
|
56 |
+
"resnet_time_scale_shift": "default",
|
57 |
+
"reverse_transformer_layers_per_block": null,
|
58 |
+
"sample_size": 96,
|
59 |
+
"time_cond_proj_dim": null,
|
60 |
+
"time_embedding_act_fn": null,
|
61 |
+
"time_embedding_dim": null,
|
62 |
+
"time_embedding_type": "positional",
|
63 |
+
"timestep_post_act": null,
|
64 |
+
"transformer_layers_per_block": 1,
|
65 |
+
"up_block_types": [
|
66 |
+
"UpBlock2D",
|
67 |
+
"CrossAttnUpBlock2D",
|
68 |
+
"CrossAttnUpBlock2D",
|
69 |
+
"CrossAttnUpBlock2D"
|
70 |
+
],
|
71 |
+
"upcast_attention": true,
|
72 |
+
"use_linear_projection": true
|
73 |
+
}
|
unet/diffusion_pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9f2ece8e991c8bee90c452b92c348aec4ff99a4b24b80ed226809ac114707c3e
|
3 |
+
size 3463925558
|
unet/diffusion_pytorch_model.fp16.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6138d8250972d1e5b8a13ea8490bba91400ed13c765f41ba07bd639f496a9787
|
3 |
+
size 1731904736
|
vae/config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "AutoencoderKL",
|
3 |
+
"_diffusers_version": "0.28.0",
|
4 |
+
"_name_or_path": "weights/stable-normal-v0-1/vae",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"block_out_channels": [
|
7 |
+
128,
|
8 |
+
256,
|
9 |
+
512,
|
10 |
+
512
|
11 |
+
],
|
12 |
+
"down_block_types": [
|
13 |
+
"DownEncoderBlock2D",
|
14 |
+
"DownEncoderBlock2D",
|
15 |
+
"DownEncoderBlock2D",
|
16 |
+
"DownEncoderBlock2D"
|
17 |
+
],
|
18 |
+
"force_upcast": true,
|
19 |
+
"in_channels": 3,
|
20 |
+
"latent_channels": 4,
|
21 |
+
"latents_mean": null,
|
22 |
+
"latents_std": null,
|
23 |
+
"layers_per_block": 2,
|
24 |
+
"norm_num_groups": 32,
|
25 |
+
"out_channels": 3,
|
26 |
+
"sample_size": 768,
|
27 |
+
"scaling_factor": 0.18215,
|
28 |
+
"up_block_types": [
|
29 |
+
"UpDecoderBlock2D",
|
30 |
+
"UpDecoderBlock2D",
|
31 |
+
"UpDecoderBlock2D",
|
32 |
+
"UpDecoderBlock2D"
|
33 |
+
]
|
34 |
+
}
|
vae/diffusion_pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:66d3d118c9806bd5dfc3da5c75f95e71dc57510fd7a3d34b7e8f510fcba3243e
|
3 |
+
size 334712578
|
vae/diffusion_pytorch_model.fp16.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3e4c08995484ee61270175e9e7a072b66a6e4eeb5f0c266667fe1f45b90daf9a
|
3 |
+
size 167335342
|