|
from dataclasses import dataclass
|
|
from typing import Dict, List, Optional, Tuple, Union
|
|
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.utils.checkpoint
|
|
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
|
from diffusers.models.attention_processor import AttentionProcessor
|
|
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
|
|
from diffusers.models.modeling_utils import ModelMixin
|
|
from diffusers.utils import BaseOutput, logging
|
|
|
|
from memo.models.resnet import InflatedConv3d, InflatedGroupNorm
|
|
from memo.models.unet_3d_blocks import (
|
|
UNetMidBlock3DCrossAttn,
|
|
get_down_block,
|
|
get_up_block,
|
|
)
|
|
|
|
|
|
logger = logging.get_logger(__name__)
|
|
|
|
|
|
@dataclass
|
|
class UNet3DConditionOutput(BaseOutput):
|
|
sample: torch.FloatTensor
|
|
|
|
|
|
class UNet3DConditionModel(ModelMixin, ConfigMixin):
|
|
_supports_gradient_checkpointing = True
|
|
|
|
@register_to_config
|
|
def __init__(
|
|
self,
|
|
sample_size: Optional[int] = None,
|
|
in_channels: int = 8,
|
|
out_channels: int = 8,
|
|
flip_sin_to_cos: bool = True,
|
|
freq_shift: int = 0,
|
|
down_block_types: Tuple[str] = (
|
|
"CrossAttnDownBlock3D",
|
|
"CrossAttnDownBlock3D",
|
|
"CrossAttnDownBlock3D",
|
|
"DownBlock3D",
|
|
),
|
|
mid_block_type: str = "UNetMidBlock3DCrossAttn",
|
|
up_block_types: Tuple[str] = (
|
|
"UpBlock3D",
|
|
"CrossAttnUpBlock3D",
|
|
"CrossAttnUpBlock3D",
|
|
"CrossAttnUpBlock3D",
|
|
),
|
|
only_cross_attention: Union[bool, Tuple[bool]] = False,
|
|
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
|
|
layers_per_block: int = 2,
|
|
downsample_padding: int = 1,
|
|
mid_block_scale_factor: float = 1,
|
|
act_fn: str = "silu",
|
|
norm_num_groups: int = 32,
|
|
norm_eps: float = 1e-5,
|
|
cross_attention_dim: int = 1280,
|
|
attention_head_dim: Union[int, Tuple[int]] = 8,
|
|
dual_cross_attention: bool = False,
|
|
use_linear_projection: bool = False,
|
|
class_embed_type: Optional[str] = None,
|
|
num_class_embeds: Optional[int] = None,
|
|
upcast_attention: bool = False,
|
|
resnet_time_scale_shift: str = "default",
|
|
use_inflated_groupnorm=False,
|
|
|
|
motion_module_resolutions=(1, 2, 4, 8),
|
|
motion_module_kwargs=None,
|
|
unet_use_cross_frame_attention=None,
|
|
unet_use_temporal_attention=None,
|
|
|
|
audio_attention_dim=768,
|
|
emo_drop_rate=0.3,
|
|
):
|
|
super().__init__()
|
|
|
|
self.sample_size = sample_size
|
|
time_embed_dim = block_out_channels[0] * 4
|
|
|
|
|
|
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
|
|
|
|
|
|
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
|
|
timestep_input_dim = block_out_channels[0]
|
|
|
|
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
|
|
|
|
|
|
if class_embed_type is None and num_class_embeds is not None:
|
|
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
|
|
elif class_embed_type == "timestep":
|
|
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
|
|
elif class_embed_type == "identity":
|
|
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
|
|
else:
|
|
self.class_embedding = None
|
|
|
|
self.down_blocks = nn.ModuleList([])
|
|
self.mid_block = None
|
|
self.up_blocks = nn.ModuleList([])
|
|
|
|
if isinstance(only_cross_attention, bool):
|
|
only_cross_attention = [only_cross_attention] * len(down_block_types)
|
|
|
|
if isinstance(attention_head_dim, int):
|
|
attention_head_dim = (attention_head_dim,) * len(down_block_types)
|
|
|
|
|
|
output_channel = block_out_channels[0]
|
|
for i, down_block_type in enumerate(down_block_types):
|
|
res = 2**i
|
|
input_channel = output_channel
|
|
output_channel = block_out_channels[i]
|
|
is_final_block = i == len(block_out_channels) - 1
|
|
|
|
down_block = get_down_block(
|
|
down_block_type,
|
|
num_layers=layers_per_block,
|
|
in_channels=input_channel,
|
|
out_channels=output_channel,
|
|
temb_channels=time_embed_dim,
|
|
add_downsample=not is_final_block,
|
|
resnet_eps=norm_eps,
|
|
resnet_act_fn=act_fn,
|
|
resnet_groups=norm_num_groups,
|
|
cross_attention_dim=cross_attention_dim,
|
|
attn_num_head_channels=attention_head_dim[i],
|
|
downsample_padding=downsample_padding,
|
|
dual_cross_attention=dual_cross_attention,
|
|
use_linear_projection=use_linear_projection,
|
|
only_cross_attention=only_cross_attention[i],
|
|
upcast_attention=upcast_attention,
|
|
resnet_time_scale_shift=resnet_time_scale_shift,
|
|
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
|
|
unet_use_temporal_attention=unet_use_temporal_attention,
|
|
use_inflated_groupnorm=use_inflated_groupnorm,
|
|
use_motion_module=res in motion_module_resolutions,
|
|
motion_module_kwargs=motion_module_kwargs,
|
|
audio_attention_dim=audio_attention_dim,
|
|
depth=i,
|
|
emo_drop_rate=emo_drop_rate,
|
|
)
|
|
self.down_blocks.append(down_block)
|
|
|
|
|
|
if mid_block_type == "UNetMidBlock3DCrossAttn":
|
|
self.mid_block = UNetMidBlock3DCrossAttn(
|
|
in_channels=block_out_channels[-1],
|
|
temb_channels=time_embed_dim,
|
|
resnet_eps=norm_eps,
|
|
resnet_act_fn=act_fn,
|
|
output_scale_factor=mid_block_scale_factor,
|
|
resnet_time_scale_shift=resnet_time_scale_shift,
|
|
cross_attention_dim=cross_attention_dim,
|
|
attn_num_head_channels=attention_head_dim[-1],
|
|
resnet_groups=norm_num_groups,
|
|
dual_cross_attention=dual_cross_attention,
|
|
use_linear_projection=use_linear_projection,
|
|
upcast_attention=upcast_attention,
|
|
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
|
|
unet_use_temporal_attention=unet_use_temporal_attention,
|
|
use_inflated_groupnorm=use_inflated_groupnorm,
|
|
motion_module_kwargs=motion_module_kwargs,
|
|
audio_attention_dim=audio_attention_dim,
|
|
depth=3,
|
|
emo_drop_rate=emo_drop_rate,
|
|
)
|
|
else:
|
|
raise ValueError(f"unknown mid_block_type : {mid_block_type}")
|
|
|
|
|
|
self.num_upsamplers = 0
|
|
|
|
|
|
reversed_block_out_channels = list(reversed(block_out_channels))
|
|
reversed_attention_head_dim = list(reversed(attention_head_dim))
|
|
only_cross_attention = list(reversed(only_cross_attention))
|
|
output_channel = reversed_block_out_channels[0]
|
|
for i, up_block_type in enumerate(up_block_types):
|
|
res = 2 ** (3 - i)
|
|
is_final_block = i == len(block_out_channels) - 1
|
|
|
|
prev_output_channel = output_channel
|
|
output_channel = reversed_block_out_channels[i]
|
|
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
|
|
|
|
|
|
if not is_final_block:
|
|
add_upsample = True
|
|
self.num_upsamplers += 1
|
|
else:
|
|
add_upsample = False
|
|
|
|
up_block = get_up_block(
|
|
up_block_type,
|
|
num_layers=layers_per_block + 1,
|
|
in_channels=input_channel,
|
|
out_channels=output_channel,
|
|
prev_output_channel=prev_output_channel,
|
|
temb_channels=time_embed_dim,
|
|
add_upsample=add_upsample,
|
|
resnet_eps=norm_eps,
|
|
resnet_act_fn=act_fn,
|
|
resnet_groups=norm_num_groups,
|
|
cross_attention_dim=cross_attention_dim,
|
|
attn_num_head_channels=reversed_attention_head_dim[i],
|
|
dual_cross_attention=dual_cross_attention,
|
|
use_linear_projection=use_linear_projection,
|
|
only_cross_attention=only_cross_attention[i],
|
|
upcast_attention=upcast_attention,
|
|
resnet_time_scale_shift=resnet_time_scale_shift,
|
|
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
|
|
unet_use_temporal_attention=unet_use_temporal_attention,
|
|
use_inflated_groupnorm=use_inflated_groupnorm,
|
|
use_motion_module=res in motion_module_resolutions,
|
|
motion_module_kwargs=motion_module_kwargs,
|
|
audio_attention_dim=audio_attention_dim,
|
|
depth=3 - i,
|
|
emo_drop_rate=emo_drop_rate,
|
|
is_final_block=is_final_block,
|
|
)
|
|
self.up_blocks.append(up_block)
|
|
prev_output_channel = output_channel
|
|
|
|
|
|
if use_inflated_groupnorm:
|
|
self.conv_norm_out = InflatedGroupNorm(
|
|
num_channels=block_out_channels[0],
|
|
num_groups=norm_num_groups,
|
|
eps=norm_eps,
|
|
)
|
|
else:
|
|
self.conv_norm_out = nn.GroupNorm(
|
|
num_channels=block_out_channels[0],
|
|
num_groups=norm_num_groups,
|
|
eps=norm_eps,
|
|
)
|
|
self.conv_act = nn.SiLU()
|
|
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
|
|
|
|
@property
|
|
|
|
def attn_processors(self) -> Dict[str, AttentionProcessor]:
|
|
r"""
|
|
Returns:
|
|
`dict` of attention processors: A dictionary containing all attention processors used in the model with
|
|
indexed by its weight name.
|
|
"""
|
|
|
|
processors = {}
|
|
|
|
def fn_recursive_add_processors(
|
|
name: str,
|
|
module: torch.nn.Module,
|
|
processors: Dict[str, AttentionProcessor],
|
|
):
|
|
if hasattr(module, "set_processor"):
|
|
processors[f"{name}.processor"] = module.processor
|
|
|
|
for sub_name, child in module.named_children():
|
|
if "temporal_transformer" not in sub_name:
|
|
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
|
|
|
|
return processors
|
|
|
|
for name, module in self.named_children():
|
|
if "temporal_transformer" not in name:
|
|
fn_recursive_add_processors(name, module, processors)
|
|
|
|
return processors
|
|
|
|
def set_attention_slice(self, slice_size):
|
|
r"""
|
|
Enable sliced attention computation.
|
|
|
|
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
|
|
in several steps. This is useful to save some memory in exchange for a small speed decrease.
|
|
|
|
Args:
|
|
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
|
|
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
|
|
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
|
|
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
|
|
must be a multiple of `slice_size`.
|
|
"""
|
|
sliceable_head_dims = []
|
|
|
|
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
|
|
if hasattr(module, "set_attention_slice"):
|
|
sliceable_head_dims.append(module.sliceable_head_dim)
|
|
|
|
for child in module.children():
|
|
fn_recursive_retrieve_slicable_dims(child)
|
|
|
|
|
|
for module in self.children():
|
|
fn_recursive_retrieve_slicable_dims(module)
|
|
|
|
num_slicable_layers = len(sliceable_head_dims)
|
|
|
|
if slice_size == "auto":
|
|
|
|
|
|
slice_size = [dim // 2 for dim in sliceable_head_dims]
|
|
elif slice_size == "max":
|
|
|
|
slice_size = num_slicable_layers * [1]
|
|
|
|
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
|
|
|
|
if len(slice_size) != len(sliceable_head_dims):
|
|
raise ValueError(
|
|
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
|
|
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
|
|
)
|
|
|
|
for i, size in enumerate(slice_size):
|
|
dim = sliceable_head_dims[i]
|
|
if size is not None and size > dim:
|
|
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
|
|
|
|
|
|
|
|
|
|
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
|
|
if hasattr(module, "set_attention_slice"):
|
|
module.set_attention_slice(slice_size.pop())
|
|
|
|
for child in module.children():
|
|
fn_recursive_set_attention_slice(child, slice_size)
|
|
|
|
reversed_slice_size = list(reversed(slice_size))
|
|
for module in self.children():
|
|
fn_recursive_set_attention_slice(module, reversed_slice_size)
|
|
|
|
def _set_gradient_checkpointing(self, module, value=False):
|
|
if hasattr(module, "gradient_checkpointing"):
|
|
module.gradient_checkpointing = value
|
|
|
|
|
|
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
|
|
r"""
|
|
Sets the attention processor to use to compute attention.
|
|
|
|
Parameters:
|
|
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
|
|
The instantiated processor class or a dictionary of processor classes that will be set as the processor
|
|
for **all** `Attention` layers.
|
|
|
|
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
|
|
processor. This is strongly recommended when setting trainable attention processors.
|
|
|
|
"""
|
|
count = len(self.attn_processors.keys())
|
|
|
|
if isinstance(processor, dict) and len(processor) != count:
|
|
raise ValueError(
|
|
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
|
|
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
|
|
)
|
|
|
|
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
|
|
if hasattr(module, "set_processor"):
|
|
if not isinstance(processor, dict):
|
|
module.set_processor(processor)
|
|
else:
|
|
module.set_processor(processor.pop(f"{name}.processor"))
|
|
|
|
for sub_name, child in module.named_children():
|
|
if "temporal_transformer" not in sub_name:
|
|
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
|
|
|
|
for name, module in self.named_children():
|
|
if "temporal_transformer" not in name:
|
|
fn_recursive_attn_processor(name, module, processor)
|
|
|
|
def forward(
|
|
self,
|
|
sample: torch.FloatTensor,
|
|
ref_features: dict,
|
|
timestep: Union[torch.Tensor, float, int, list],
|
|
encoder_hidden_states: torch.Tensor,
|
|
audio_embedding: Optional[torch.Tensor] = None,
|
|
audio_emotion: Optional[torch.Tensor] = None,
|
|
class_labels: Optional[torch.Tensor] = None,
|
|
mask_cond_fea: Optional[torch.Tensor] = None,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
|
|
mid_block_additional_residual: Optional[torch.Tensor] = None,
|
|
uc_mask: Optional[torch.Tensor] = None,
|
|
return_dict: bool = True,
|
|
is_new_audio=True,
|
|
update_past_memory=False,
|
|
) -> Union[UNet3DConditionOutput, Tuple]:
|
|
|
|
|
|
|
|
|
|
default_overall_up_factor = 2**self.num_upsamplers
|
|
|
|
|
|
forward_upsample_size = False
|
|
upsample_size = None
|
|
|
|
if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
|
|
logger.info("Forward upsample size to force interpolation output size.")
|
|
forward_upsample_size = True
|
|
|
|
|
|
if attention_mask is not None:
|
|
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
|
|
attention_mask = attention_mask.unsqueeze(1)
|
|
|
|
|
|
if self.config.center_input_sample:
|
|
sample = 2 * sample - 1.0
|
|
|
|
|
|
timesteps = timestep
|
|
if isinstance(timesteps, list):
|
|
t_emb_list = []
|
|
for timesteps in timestep:
|
|
if not torch.is_tensor(timesteps):
|
|
|
|
is_mps = sample.device.type == "mps"
|
|
if isinstance(timestep, float):
|
|
dtype = torch.float32 if is_mps else torch.float64
|
|
else:
|
|
dtype = torch.int32 if is_mps else torch.int64
|
|
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
|
|
elif len(timesteps.shape) == 0:
|
|
timesteps = timesteps[None].to(sample.device)
|
|
|
|
timesteps = timesteps.expand(sample.shape[0])
|
|
t_emb = self.time_proj(timesteps)
|
|
t_emb_list.append(t_emb)
|
|
|
|
t_emb = torch.stack(t_emb_list, dim=1)
|
|
else:
|
|
if not torch.is_tensor(timesteps):
|
|
|
|
is_mps = sample.device.type == "mps"
|
|
if isinstance(timestep, float):
|
|
dtype = torch.float32 if is_mps else torch.float64
|
|
else:
|
|
dtype = torch.int32 if is_mps else torch.int64
|
|
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
|
|
elif len(timesteps.shape) == 0:
|
|
timesteps = timesteps[None].to(sample.device)
|
|
|
|
timesteps = timesteps.expand(sample.shape[0])
|
|
t_emb = self.time_proj(timesteps)
|
|
|
|
|
|
|
|
|
|
t_emb = t_emb.to(dtype=self.dtype)
|
|
emb = self.time_embedding(t_emb)
|
|
|
|
if self.class_embedding is not None:
|
|
if class_labels is None:
|
|
raise ValueError("class_labels should be provided when num_class_embeds > 0")
|
|
|
|
if self.config.class_embed_type == "timestep":
|
|
class_labels = self.time_proj(class_labels)
|
|
|
|
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
|
|
emb = emb + class_emb
|
|
|
|
|
|
sample = self.conv_in(sample)
|
|
if mask_cond_fea is not None:
|
|
sample = sample + mask_cond_fea
|
|
|
|
|
|
down_block_res_samples = (sample,)
|
|
for i, downsample_block in enumerate(self.down_blocks):
|
|
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
|
|
sample, res_samples, audio_embedding = downsample_block(
|
|
hidden_states=sample,
|
|
ref_feature_list=ref_features["down"][i],
|
|
temb=emb,
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
attention_mask=attention_mask,
|
|
audio_embedding=audio_embedding,
|
|
emotion=audio_emotion,
|
|
uc_mask=uc_mask,
|
|
is_new_audio=is_new_audio,
|
|
update_past_memory=update_past_memory,
|
|
)
|
|
else:
|
|
sample, res_samples = downsample_block(
|
|
hidden_states=sample,
|
|
ref_feature_list=ref_features["down"][i],
|
|
temb=emb,
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
is_new_audio=is_new_audio,
|
|
update_past_memory=update_past_memory,
|
|
)
|
|
|
|
down_block_res_samples += res_samples
|
|
|
|
if down_block_additional_residuals is not None:
|
|
new_down_block_res_samples = ()
|
|
|
|
for down_block_res_sample, down_block_additional_residual in zip(
|
|
down_block_res_samples, down_block_additional_residuals
|
|
):
|
|
down_block_res_sample = down_block_res_sample + down_block_additional_residual
|
|
new_down_block_res_samples += (down_block_res_sample,)
|
|
|
|
down_block_res_samples = new_down_block_res_samples
|
|
|
|
|
|
sample, audio_embedding = self.mid_block(
|
|
sample,
|
|
ref_feature_list=ref_features["mid"][0],
|
|
temb=emb,
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
attention_mask=attention_mask,
|
|
audio_embedding=audio_embedding,
|
|
emotion=audio_emotion,
|
|
uc_mask=uc_mask,
|
|
is_new_audio=is_new_audio,
|
|
update_past_memory=update_past_memory,
|
|
)
|
|
|
|
if mid_block_additional_residual is not None:
|
|
sample = sample + mid_block_additional_residual
|
|
|
|
|
|
for i, upsample_block in enumerate(self.up_blocks):
|
|
is_final_block = i == len(self.up_blocks) - 1
|
|
|
|
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
|
|
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
|
|
|
|
|
|
|
|
if not is_final_block and forward_upsample_size:
|
|
upsample_size = down_block_res_samples[-1].shape[2:]
|
|
|
|
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
|
|
sample, audio_embedding = upsample_block(
|
|
hidden_states=sample,
|
|
ref_feature_list=ref_features["up"][i],
|
|
temb=emb,
|
|
res_hidden_states_tuple=res_samples,
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
upsample_size=upsample_size,
|
|
attention_mask=attention_mask,
|
|
audio_embedding=audio_embedding,
|
|
emotion=audio_emotion,
|
|
uc_mask=uc_mask,
|
|
is_new_audio=is_new_audio,
|
|
update_past_memory=update_past_memory,
|
|
)
|
|
else:
|
|
sample = upsample_block(
|
|
hidden_states=sample,
|
|
ref_feature_list=ref_features["up"][i],
|
|
temb=emb,
|
|
res_hidden_states_tuple=res_samples,
|
|
upsample_size=upsample_size,
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
is_new_audio=is_new_audio,
|
|
update_past_memory=update_past_memory,
|
|
)
|
|
|
|
|
|
sample = self.conv_norm_out(sample)
|
|
sample = self.conv_act(sample)
|
|
sample = self.conv_out(sample)
|
|
|
|
if not return_dict:
|
|
return (sample,)
|
|
|
|
return UNet3DConditionOutput(sample=sample)
|
|
|