|
import inspect
|
|
import math
|
|
from inspect import isfunction
|
|
from typing import Any, Callable, List, Optional, Union
|
|
|
|
import numpy as np
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.nn.functional as F
|
|
import torchvision.transforms.functional as TF
|
|
|
|
|
|
import xformers
|
|
import xformers.ops
|
|
from diffusers import AutoencoderKL, DiffusionPipeline
|
|
from diffusers.configuration_utils import ConfigMixin, FrozenDict
|
|
from diffusers.models.modeling_utils import ModelMixin
|
|
from diffusers.schedulers import DDIMScheduler
|
|
from diffusers.utils import (
|
|
deprecate,
|
|
is_accelerate_available,
|
|
is_accelerate_version,
|
|
logging,
|
|
)
|
|
from diffusers.utils.torch_utils import randn_tensor
|
|
from einops import rearrange, repeat
|
|
from kiui.cam import orbit_camera
|
|
from transformers import (
|
|
CLIPImageProcessor,
|
|
CLIPTextModel,
|
|
CLIPTokenizer,
|
|
CLIPVisionModel,
|
|
)
|
|
|
|
|
|
def get_camera(
|
|
num_frames,
|
|
elevation=15,
|
|
azimuth_start=0,
|
|
azimuth_span=360,
|
|
blender_coord=True,
|
|
extra_view=False,
|
|
):
|
|
angle_gap = azimuth_span / num_frames
|
|
cameras = []
|
|
for azimuth in np.arange(azimuth_start, azimuth_span + azimuth_start, angle_gap):
|
|
|
|
pose = orbit_camera(
|
|
-elevation, azimuth, radius=1
|
|
)
|
|
|
|
|
|
if blender_coord:
|
|
pose[2] *= -1
|
|
pose[[1, 2]] = pose[[2, 1]]
|
|
|
|
cameras.append(pose.flatten())
|
|
|
|
if extra_view:
|
|
cameras.append(np.zeros_like(cameras[0]))
|
|
|
|
return torch.from_numpy(np.stack(cameras, axis=0)).float()
|
|
|
|
|
|
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
|
|
"""
|
|
Create sinusoidal timestep embeddings.
|
|
:param timesteps: a 1-D Tensor of N indices, one per batch element.
|
|
These may be fractional.
|
|
:param dim: the dimension of the output.
|
|
:param max_period: controls the minimum frequency of the embeddings.
|
|
:return: an [N x dim] Tensor of positional embeddings.
|
|
"""
|
|
if not repeat_only:
|
|
half = dim // 2
|
|
freqs = torch.exp(
|
|
-math.log(max_period)
|
|
* torch.arange(start=0, end=half, dtype=torch.float32)
|
|
/ half
|
|
).to(device=timesteps.device)
|
|
args = timesteps[:, None] * freqs[None]
|
|
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
|
if dim % 2:
|
|
embedding = torch.cat(
|
|
[embedding, torch.zeros_like(embedding[:, :1])], dim=-1
|
|
)
|
|
else:
|
|
embedding = repeat(timesteps, "b -> b d", d=dim)
|
|
|
|
return embedding
|
|
|
|
|
|
def zero_module(module):
|
|
"""
|
|
Zero out the parameters of a module and return it.
|
|
"""
|
|
for p in module.parameters():
|
|
p.detach().zero_()
|
|
return module
|
|
|
|
|
|
def conv_nd(dims, *args, **kwargs):
|
|
"""
|
|
Create a 1D, 2D, or 3D convolution module.
|
|
"""
|
|
if dims == 1:
|
|
return nn.Conv1d(*args, **kwargs)
|
|
elif dims == 2:
|
|
return nn.Conv2d(*args, **kwargs)
|
|
elif dims == 3:
|
|
return nn.Conv3d(*args, **kwargs)
|
|
raise ValueError(f"unsupported dimensions: {dims}")
|
|
|
|
|
|
def avg_pool_nd(dims, *args, **kwargs):
|
|
"""
|
|
Create a 1D, 2D, or 3D average pooling module.
|
|
"""
|
|
if dims == 1:
|
|
return nn.AvgPool1d(*args, **kwargs)
|
|
elif dims == 2:
|
|
return nn.AvgPool2d(*args, **kwargs)
|
|
elif dims == 3:
|
|
return nn.AvgPool3d(*args, **kwargs)
|
|
raise ValueError(f"unsupported dimensions: {dims}")
|
|
|
|
|
|
def default(val, d):
|
|
if val is not None:
|
|
return val
|
|
return d() if isfunction(d) else d
|
|
|
|
|
|
class GEGLU(nn.Module):
|
|
def __init__(self, dim_in, dim_out):
|
|
super().__init__()
|
|
self.proj = nn.Linear(dim_in, dim_out * 2)
|
|
|
|
def forward(self, x):
|
|
x, gate = self.proj(x).chunk(2, dim=-1)
|
|
return x * F.gelu(gate)
|
|
|
|
|
|
class FeedForward(nn.Module):
|
|
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0):
|
|
super().__init__()
|
|
inner_dim = int(dim * mult)
|
|
dim_out = default(dim_out, dim)
|
|
project_in = (
|
|
nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU())
|
|
if not glu
|
|
else GEGLU(dim, inner_dim)
|
|
)
|
|
|
|
self.net = nn.Sequential(
|
|
project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out)
|
|
)
|
|
|
|
def forward(self, x):
|
|
return self.net(x)
|
|
|
|
|
|
class MemoryEfficientCrossAttention(nn.Module):
|
|
|
|
def __init__(
|
|
self,
|
|
query_dim,
|
|
context_dim=None,
|
|
heads=8,
|
|
dim_head=64,
|
|
dropout=0.0,
|
|
ip_dim=0,
|
|
ip_weight=1,
|
|
):
|
|
super().__init__()
|
|
|
|
inner_dim = dim_head * heads
|
|
context_dim = default(context_dim, query_dim)
|
|
|
|
self.heads = heads
|
|
self.dim_head = dim_head
|
|
|
|
self.ip_dim = ip_dim
|
|
self.ip_weight = ip_weight
|
|
|
|
if self.ip_dim > 0:
|
|
self.to_k_ip = nn.Linear(context_dim, inner_dim, bias=False)
|
|
self.to_v_ip = nn.Linear(context_dim, inner_dim, bias=False)
|
|
|
|
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
|
|
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
|
|
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
|
|
|
|
self.to_out = nn.Sequential(
|
|
nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)
|
|
)
|
|
self.attention_op: Optional[Any] = None
|
|
|
|
def forward(self, x, context=None):
|
|
q = self.to_q(x)
|
|
context = default(context, x)
|
|
|
|
if self.ip_dim > 0:
|
|
|
|
token_len = context.shape[1]
|
|
context_ip = context[:, -self.ip_dim :, :]
|
|
k_ip = self.to_k_ip(context_ip)
|
|
v_ip = self.to_v_ip(context_ip)
|
|
context = context[:, : (token_len - self.ip_dim), :]
|
|
|
|
k = self.to_k(context)
|
|
v = self.to_v(context)
|
|
|
|
b, _, _ = q.shape
|
|
q, k, v = map(
|
|
lambda t: t.unsqueeze(3)
|
|
.reshape(b, t.shape[1], self.heads, self.dim_head)
|
|
.permute(0, 2, 1, 3)
|
|
.reshape(b * self.heads, t.shape[1], self.dim_head)
|
|
.contiguous(),
|
|
(q, k, v),
|
|
)
|
|
|
|
|
|
out = xformers.ops.memory_efficient_attention(
|
|
q, k, v, attn_bias=None, op=self.attention_op
|
|
)
|
|
|
|
if self.ip_dim > 0:
|
|
k_ip, v_ip = map(
|
|
lambda t: t.unsqueeze(3)
|
|
.reshape(b, t.shape[1], self.heads, self.dim_head)
|
|
.permute(0, 2, 1, 3)
|
|
.reshape(b * self.heads, t.shape[1], self.dim_head)
|
|
.contiguous(),
|
|
(k_ip, v_ip),
|
|
)
|
|
|
|
out_ip = xformers.ops.memory_efficient_attention(
|
|
q, k_ip, v_ip, attn_bias=None, op=self.attention_op
|
|
)
|
|
out = out + self.ip_weight * out_ip
|
|
|
|
out = (
|
|
out.unsqueeze(0)
|
|
.reshape(b, self.heads, out.shape[1], self.dim_head)
|
|
.permute(0, 2, 1, 3)
|
|
.reshape(b, out.shape[1], self.heads * self.dim_head)
|
|
)
|
|
return self.to_out(out)
|
|
|
|
|
|
class BasicTransformerBlock3D(nn.Module):
|
|
|
|
def __init__(
|
|
self,
|
|
dim,
|
|
n_heads,
|
|
d_head,
|
|
context_dim,
|
|
dropout=0.0,
|
|
gated_ff=True,
|
|
ip_dim=0,
|
|
ip_weight=1,
|
|
):
|
|
super().__init__()
|
|
|
|
self.attn1 = MemoryEfficientCrossAttention(
|
|
query_dim=dim,
|
|
context_dim=None,
|
|
heads=n_heads,
|
|
dim_head=d_head,
|
|
dropout=dropout,
|
|
)
|
|
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
|
|
self.attn2 = MemoryEfficientCrossAttention(
|
|
query_dim=dim,
|
|
context_dim=context_dim,
|
|
heads=n_heads,
|
|
dim_head=d_head,
|
|
dropout=dropout,
|
|
|
|
ip_dim=ip_dim,
|
|
ip_weight=ip_weight,
|
|
)
|
|
self.norm1 = nn.LayerNorm(dim)
|
|
self.norm2 = nn.LayerNorm(dim)
|
|
self.norm3 = nn.LayerNorm(dim)
|
|
|
|
def forward(self, x, context=None, num_frames=1):
|
|
x = rearrange(x, "(b f) l c -> b (f l) c", f=num_frames).contiguous()
|
|
x = self.attn1(self.norm1(x), context=None) + x
|
|
x = rearrange(x, "b (f l) c -> (b f) l c", f=num_frames).contiguous()
|
|
x = self.attn2(self.norm2(x), context=context) + x
|
|
x = self.ff(self.norm3(x)) + x
|
|
return x
|
|
|
|
|
|
class SpatialTransformer3D(nn.Module):
|
|
|
|
def __init__(
|
|
self,
|
|
in_channels,
|
|
n_heads,
|
|
d_head,
|
|
context_dim,
|
|
depth=1,
|
|
dropout=0.0,
|
|
ip_dim=0,
|
|
ip_weight=1,
|
|
):
|
|
super().__init__()
|
|
|
|
if not isinstance(context_dim, list):
|
|
context_dim = [context_dim]
|
|
|
|
self.in_channels = in_channels
|
|
|
|
inner_dim = n_heads * d_head
|
|
self.norm = nn.GroupNorm(
|
|
num_groups=32, num_channels=in_channels, eps=1e-6, affine=True
|
|
)
|
|
self.proj_in = nn.Linear(in_channels, inner_dim)
|
|
|
|
self.transformer_blocks = nn.ModuleList(
|
|
[
|
|
BasicTransformerBlock3D(
|
|
inner_dim,
|
|
n_heads,
|
|
d_head,
|
|
context_dim=context_dim[d],
|
|
dropout=dropout,
|
|
ip_dim=ip_dim,
|
|
ip_weight=ip_weight,
|
|
)
|
|
for d in range(depth)
|
|
]
|
|
)
|
|
|
|
self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))
|
|
|
|
def forward(self, x, context=None, num_frames=1):
|
|
|
|
if not isinstance(context, list):
|
|
context = [context]
|
|
b, c, h, w = x.shape
|
|
x_in = x
|
|
x = self.norm(x)
|
|
x = rearrange(x, "b c h w -> b (h w) c").contiguous()
|
|
x = self.proj_in(x)
|
|
for i, block in enumerate(self.transformer_blocks):
|
|
x = block(x, context=context[i], num_frames=num_frames)
|
|
x = self.proj_out(x)
|
|
x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w).contiguous()
|
|
|
|
return x + x_in
|
|
|
|
|
|
class PerceiverAttention(nn.Module):
|
|
def __init__(self, *, dim, dim_head=64, heads=8):
|
|
super().__init__()
|
|
self.scale = dim_head**-0.5
|
|
self.dim_head = dim_head
|
|
self.heads = heads
|
|
inner_dim = dim_head * heads
|
|
|
|
self.norm1 = nn.LayerNorm(dim)
|
|
self.norm2 = nn.LayerNorm(dim)
|
|
|
|
self.to_q = nn.Linear(dim, inner_dim, bias=False)
|
|
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
|
|
self.to_out = nn.Linear(inner_dim, dim, bias=False)
|
|
|
|
def forward(self, x, latents):
|
|
"""
|
|
Args:
|
|
x (torch.Tensor): image features
|
|
shape (b, n1, D)
|
|
latent (torch.Tensor): latent features
|
|
shape (b, n2, D)
|
|
"""
|
|
x = self.norm1(x)
|
|
latents = self.norm2(latents)
|
|
|
|
b, h, _ = latents.shape
|
|
|
|
q = self.to_q(latents)
|
|
kv_input = torch.cat((x, latents), dim=-2)
|
|
k, v = self.to_kv(kv_input).chunk(2, dim=-1)
|
|
|
|
q, k, v = map(
|
|
lambda t: t.reshape(b, t.shape[1], self.heads, -1)
|
|
.transpose(1, 2)
|
|
.reshape(b, self.heads, t.shape[1], -1)
|
|
.contiguous(),
|
|
(q, k, v),
|
|
)
|
|
|
|
|
|
scale = 1 / math.sqrt(math.sqrt(self.dim_head))
|
|
weight = (q * scale) @ (k * scale).transpose(
|
|
-2, -1
|
|
)
|
|
weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
|
|
out = weight @ v
|
|
|
|
out = out.permute(0, 2, 1, 3).reshape(b, h, -1)
|
|
|
|
return self.to_out(out)
|
|
|
|
|
|
class Resampler(nn.Module):
|
|
def __init__(
|
|
self,
|
|
dim=1024,
|
|
depth=8,
|
|
dim_head=64,
|
|
heads=16,
|
|
num_queries=8,
|
|
embedding_dim=768,
|
|
output_dim=1024,
|
|
ff_mult=4,
|
|
):
|
|
super().__init__()
|
|
self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
|
|
self.proj_in = nn.Linear(embedding_dim, dim)
|
|
self.proj_out = nn.Linear(dim, output_dim)
|
|
self.norm_out = nn.LayerNorm(output_dim)
|
|
|
|
self.layers = nn.ModuleList([])
|
|
for _ in range(depth):
|
|
self.layers.append(
|
|
nn.ModuleList(
|
|
[
|
|
PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
|
|
nn.Sequential(
|
|
nn.LayerNorm(dim),
|
|
nn.Linear(dim, dim * ff_mult, bias=False),
|
|
nn.GELU(),
|
|
nn.Linear(dim * ff_mult, dim, bias=False),
|
|
),
|
|
]
|
|
)
|
|
)
|
|
|
|
def forward(self, x):
|
|
latents = self.latents.repeat(x.size(0), 1, 1)
|
|
x = self.proj_in(x)
|
|
for attn, ff in self.layers:
|
|
latents = attn(x, latents) + latents
|
|
latents = ff(latents) + latents
|
|
|
|
latents = self.proj_out(latents)
|
|
return self.norm_out(latents)
|
|
|
|
|
|
class CondSequential(nn.Sequential):
|
|
"""
|
|
A sequential module that passes timestep embeddings to the children that
|
|
support it as an extra input.
|
|
"""
|
|
|
|
def forward(self, x, emb, context=None, num_frames=1):
|
|
for layer in self:
|
|
if isinstance(layer, ResBlock):
|
|
x = layer(x, emb)
|
|
elif isinstance(layer, SpatialTransformer3D):
|
|
x = layer(x, context, num_frames=num_frames)
|
|
else:
|
|
x = layer(x)
|
|
return x
|
|
|
|
|
|
class Upsample(nn.Module):
|
|
"""
|
|
An upsampling layer with an optional convolution.
|
|
:param channels: channels in the inputs and outputs.
|
|
:param use_conv: a bool determining if a convolution is applied.
|
|
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
|
upsampling occurs in the inner-two dimensions.
|
|
"""
|
|
|
|
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
|
|
super().__init__()
|
|
self.channels = channels
|
|
self.out_channels = out_channels or channels
|
|
self.use_conv = use_conv
|
|
self.dims = dims
|
|
if use_conv:
|
|
self.conv = conv_nd(
|
|
dims, self.channels, self.out_channels, 3, padding=padding
|
|
)
|
|
|
|
def forward(self, x):
|
|
assert x.shape[1] == self.channels
|
|
if self.dims == 3:
|
|
x = F.interpolate(
|
|
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
|
|
)
|
|
else:
|
|
x = F.interpolate(x, scale_factor=2, mode="nearest")
|
|
if self.use_conv:
|
|
x = self.conv(x)
|
|
return x
|
|
|
|
|
|
class Downsample(nn.Module):
|
|
"""
|
|
A downsampling layer with an optional convolution.
|
|
:param channels: channels in the inputs and outputs.
|
|
:param use_conv: a bool determining if a convolution is applied.
|
|
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
|
downsampling occurs in the inner-two dimensions.
|
|
"""
|
|
|
|
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
|
|
super().__init__()
|
|
self.channels = channels
|
|
self.out_channels = out_channels or channels
|
|
self.use_conv = use_conv
|
|
self.dims = dims
|
|
stride = 2 if dims != 3 else (1, 2, 2)
|
|
if use_conv:
|
|
self.op = conv_nd(
|
|
dims,
|
|
self.channels,
|
|
self.out_channels,
|
|
3,
|
|
stride=stride,
|
|
padding=padding,
|
|
)
|
|
else:
|
|
assert self.channels == self.out_channels
|
|
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
|
|
|
|
def forward(self, x):
|
|
assert x.shape[1] == self.channels
|
|
return self.op(x)
|
|
|
|
|
|
class ResBlock(nn.Module):
|
|
"""
|
|
A residual block that can optionally change the number of channels.
|
|
:param channels: the number of input channels.
|
|
:param emb_channels: the number of timestep embedding channels.
|
|
:param dropout: the rate of dropout.
|
|
:param out_channels: if specified, the number of out channels.
|
|
:param use_conv: if True and out_channels is specified, use a spatial
|
|
convolution instead of a smaller 1x1 convolution to change the
|
|
channels in the skip connection.
|
|
:param dims: determines if the signal is 1D, 2D, or 3D.
|
|
:param up: if True, use this block for upsampling.
|
|
:param down: if True, use this block for downsampling.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
channels,
|
|
emb_channels,
|
|
dropout,
|
|
out_channels=None,
|
|
use_conv=False,
|
|
use_scale_shift_norm=False,
|
|
dims=2,
|
|
up=False,
|
|
down=False,
|
|
):
|
|
super().__init__()
|
|
self.channels = channels
|
|
self.emb_channels = emb_channels
|
|
self.dropout = dropout
|
|
self.out_channels = out_channels or channels
|
|
self.use_conv = use_conv
|
|
self.use_scale_shift_norm = use_scale_shift_norm
|
|
|
|
self.in_layers = nn.Sequential(
|
|
nn.GroupNorm(32, channels),
|
|
nn.SiLU(),
|
|
conv_nd(dims, channels, self.out_channels, 3, padding=1),
|
|
)
|
|
|
|
self.updown = up or down
|
|
|
|
if up:
|
|
self.h_upd = Upsample(channels, False, dims)
|
|
self.x_upd = Upsample(channels, False, dims)
|
|
elif down:
|
|
self.h_upd = Downsample(channels, False, dims)
|
|
self.x_upd = Downsample(channels, False, dims)
|
|
else:
|
|
self.h_upd = self.x_upd = nn.Identity()
|
|
|
|
self.emb_layers = nn.Sequential(
|
|
nn.SiLU(),
|
|
nn.Linear(
|
|
emb_channels,
|
|
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
|
|
),
|
|
)
|
|
self.out_layers = nn.Sequential(
|
|
nn.GroupNorm(32, self.out_channels),
|
|
nn.SiLU(),
|
|
nn.Dropout(p=dropout),
|
|
zero_module(
|
|
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
|
|
),
|
|
)
|
|
|
|
if self.out_channels == channels:
|
|
self.skip_connection = nn.Identity()
|
|
elif use_conv:
|
|
self.skip_connection = conv_nd(
|
|
dims, channels, self.out_channels, 3, padding=1
|
|
)
|
|
else:
|
|
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
|
|
|
|
def forward(self, x, emb):
|
|
if self.updown:
|
|
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
|
|
h = in_rest(x)
|
|
h = self.h_upd(h)
|
|
x = self.x_upd(x)
|
|
h = in_conv(h)
|
|
else:
|
|
h = self.in_layers(x)
|
|
emb_out = self.emb_layers(emb).type(h.dtype)
|
|
while len(emb_out.shape) < len(h.shape):
|
|
emb_out = emb_out[..., None]
|
|
if self.use_scale_shift_norm:
|
|
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
|
|
scale, shift = torch.chunk(emb_out, 2, dim=1)
|
|
h = out_norm(h) * (1 + scale) + shift
|
|
h = out_rest(h)
|
|
else:
|
|
h = h + emb_out
|
|
h = self.out_layers(h)
|
|
return self.skip_connection(x) + h
|
|
|
|
|
|
class MultiViewUNetModel(ModelMixin, ConfigMixin):
|
|
"""
|
|
The full multi-view UNet model with attention, timestep embedding and camera embedding.
|
|
:param in_channels: channels in the input Tensor.
|
|
:param model_channels: base channel count for the model.
|
|
:param out_channels: channels in the output Tensor.
|
|
:param num_res_blocks: number of residual blocks per downsample.
|
|
:param attention_resolutions: a collection of downsample rates at which
|
|
attention will take place. May be a set, list, or tuple.
|
|
For example, if this contains 4, then at 4x downsampling, attention
|
|
will be used.
|
|
:param dropout: the dropout probability.
|
|
:param channel_mult: channel multiplier for each level of the UNet.
|
|
:param conv_resample: if True, use learned convolutions for upsampling and
|
|
downsampling.
|
|
:param dims: determines if the signal is 1D, 2D, or 3D.
|
|
:param num_classes: if specified (as an int), then this model will be
|
|
class-conditional with `num_classes` classes.
|
|
:param num_heads: the number of attention heads in each attention layer.
|
|
:param num_heads_channels: if specified, ignore num_heads and instead use
|
|
a fixed channel width per attention head.
|
|
:param num_heads_upsample: works with num_heads to set a different number
|
|
of heads for upsampling. Deprecated.
|
|
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
|
|
:param resblock_updown: use residual blocks for up/downsampling.
|
|
:param use_new_attention_order: use a different attention pattern for potentially
|
|
increased efficiency.
|
|
:param camera_dim: dimensionality of camera input.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
image_size,
|
|
in_channels,
|
|
model_channels,
|
|
out_channels,
|
|
num_res_blocks,
|
|
attention_resolutions,
|
|
dropout=0,
|
|
channel_mult=(1, 2, 4, 8),
|
|
conv_resample=True,
|
|
dims=2,
|
|
num_classes=None,
|
|
num_heads=-1,
|
|
num_head_channels=-1,
|
|
num_heads_upsample=-1,
|
|
use_scale_shift_norm=False,
|
|
resblock_updown=False,
|
|
transformer_depth=1,
|
|
context_dim=None,
|
|
n_embed=None,
|
|
num_attention_blocks=None,
|
|
adm_in_channels=None,
|
|
camera_dim=None,
|
|
ip_dim=0,
|
|
ip_weight=1.0,
|
|
**kwargs,
|
|
):
|
|
super().__init__()
|
|
assert context_dim is not None
|
|
|
|
if num_heads_upsample == -1:
|
|
num_heads_upsample = num_heads
|
|
|
|
if num_heads == -1:
|
|
assert (
|
|
num_head_channels != -1
|
|
), "Either num_heads or num_head_channels has to be set"
|
|
|
|
if num_head_channels == -1:
|
|
assert (
|
|
num_heads != -1
|
|
), "Either num_heads or num_head_channels has to be set"
|
|
|
|
self.image_size = image_size
|
|
self.in_channels = in_channels
|
|
self.model_channels = model_channels
|
|
self.out_channels = out_channels
|
|
if isinstance(num_res_blocks, int):
|
|
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
|
|
else:
|
|
if len(num_res_blocks) != len(channel_mult):
|
|
raise ValueError(
|
|
"provide num_res_blocks either as an int (globally constant) or "
|
|
"as a list/tuple (per-level) with the same length as channel_mult"
|
|
)
|
|
self.num_res_blocks = num_res_blocks
|
|
|
|
if num_attention_blocks is not None:
|
|
assert len(num_attention_blocks) == len(self.num_res_blocks)
|
|
assert all(
|
|
map(
|
|
lambda i: self.num_res_blocks[i] >= num_attention_blocks[i],
|
|
range(len(num_attention_blocks)),
|
|
)
|
|
)
|
|
print(
|
|
f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
|
|
f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
|
|
f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
|
|
f"attention will still not be set."
|
|
)
|
|
|
|
self.attention_resolutions = attention_resolutions
|
|
self.dropout = dropout
|
|
self.channel_mult = channel_mult
|
|
self.conv_resample = conv_resample
|
|
self.num_classes = num_classes
|
|
self.num_heads = num_heads
|
|
self.num_head_channels = num_head_channels
|
|
self.num_heads_upsample = num_heads_upsample
|
|
self.predict_codebook_ids = n_embed is not None
|
|
|
|
self.ip_dim = ip_dim
|
|
self.ip_weight = ip_weight
|
|
|
|
if self.ip_dim > 0:
|
|
self.image_embed = Resampler(
|
|
dim=context_dim,
|
|
depth=4,
|
|
dim_head=64,
|
|
heads=12,
|
|
num_queries=ip_dim,
|
|
embedding_dim=1280,
|
|
output_dim=context_dim,
|
|
ff_mult=4,
|
|
)
|
|
|
|
time_embed_dim = model_channels * 4
|
|
self.time_embed = nn.Sequential(
|
|
nn.Linear(model_channels, time_embed_dim),
|
|
nn.SiLU(),
|
|
nn.Linear(time_embed_dim, time_embed_dim),
|
|
)
|
|
|
|
if camera_dim is not None:
|
|
time_embed_dim = model_channels * 4
|
|
self.camera_embed = nn.Sequential(
|
|
nn.Linear(camera_dim, time_embed_dim),
|
|
nn.SiLU(),
|
|
nn.Linear(time_embed_dim, time_embed_dim),
|
|
)
|
|
|
|
if self.num_classes is not None:
|
|
if isinstance(self.num_classes, int):
|
|
self.label_emb = nn.Embedding(self.num_classes, time_embed_dim)
|
|
elif self.num_classes == "continuous":
|
|
|
|
self.label_emb = nn.Linear(1, time_embed_dim)
|
|
elif self.num_classes == "sequential":
|
|
assert adm_in_channels is not None
|
|
self.label_emb = nn.Sequential(
|
|
nn.Sequential(
|
|
nn.Linear(adm_in_channels, time_embed_dim),
|
|
nn.SiLU(),
|
|
nn.Linear(time_embed_dim, time_embed_dim),
|
|
)
|
|
)
|
|
else:
|
|
raise ValueError()
|
|
|
|
self.input_blocks = nn.ModuleList(
|
|
[CondSequential(conv_nd(dims, in_channels, model_channels, 3, padding=1))]
|
|
)
|
|
self._feature_size = model_channels
|
|
input_block_chans = [model_channels]
|
|
ch = model_channels
|
|
ds = 1
|
|
for level, mult in enumerate(channel_mult):
|
|
for nr in range(self.num_res_blocks[level]):
|
|
layers: List[Any] = [
|
|
ResBlock(
|
|
ch,
|
|
time_embed_dim,
|
|
dropout,
|
|
out_channels=mult * model_channels,
|
|
dims=dims,
|
|
use_scale_shift_norm=use_scale_shift_norm,
|
|
)
|
|
]
|
|
ch = mult * model_channels
|
|
if ds in attention_resolutions:
|
|
if num_head_channels == -1:
|
|
dim_head = ch // num_heads
|
|
else:
|
|
num_heads = ch // num_head_channels
|
|
dim_head = num_head_channels
|
|
|
|
if num_attention_blocks is None or nr < num_attention_blocks[level]:
|
|
layers.append(
|
|
SpatialTransformer3D(
|
|
ch,
|
|
num_heads,
|
|
dim_head,
|
|
context_dim=context_dim,
|
|
depth=transformer_depth,
|
|
ip_dim=self.ip_dim,
|
|
ip_weight=self.ip_weight,
|
|
)
|
|
)
|
|
self.input_blocks.append(CondSequential(*layers))
|
|
self._feature_size += ch
|
|
input_block_chans.append(ch)
|
|
if level != len(channel_mult) - 1:
|
|
out_ch = ch
|
|
self.input_blocks.append(
|
|
CondSequential(
|
|
ResBlock(
|
|
ch,
|
|
time_embed_dim,
|
|
dropout,
|
|
out_channels=out_ch,
|
|
dims=dims,
|
|
use_scale_shift_norm=use_scale_shift_norm,
|
|
down=True,
|
|
)
|
|
if resblock_updown
|
|
else Downsample(
|
|
ch, conv_resample, dims=dims, out_channels=out_ch
|
|
)
|
|
)
|
|
)
|
|
ch = out_ch
|
|
input_block_chans.append(ch)
|
|
ds *= 2
|
|
self._feature_size += ch
|
|
|
|
if num_head_channels == -1:
|
|
dim_head = ch // num_heads
|
|
else:
|
|
num_heads = ch // num_head_channels
|
|
dim_head = num_head_channels
|
|
|
|
self.middle_block = CondSequential(
|
|
ResBlock(
|
|
ch,
|
|
time_embed_dim,
|
|
dropout,
|
|
dims=dims,
|
|
use_scale_shift_norm=use_scale_shift_norm,
|
|
),
|
|
SpatialTransformer3D(
|
|
ch,
|
|
num_heads,
|
|
dim_head,
|
|
context_dim=context_dim,
|
|
depth=transformer_depth,
|
|
ip_dim=self.ip_dim,
|
|
ip_weight=self.ip_weight,
|
|
),
|
|
ResBlock(
|
|
ch,
|
|
time_embed_dim,
|
|
dropout,
|
|
dims=dims,
|
|
use_scale_shift_norm=use_scale_shift_norm,
|
|
),
|
|
)
|
|
self._feature_size += ch
|
|
|
|
self.output_blocks = nn.ModuleList([])
|
|
for level, mult in list(enumerate(channel_mult))[::-1]:
|
|
for i in range(self.num_res_blocks[level] + 1):
|
|
ich = input_block_chans.pop()
|
|
layers = [
|
|
ResBlock(
|
|
ch + ich,
|
|
time_embed_dim,
|
|
dropout,
|
|
out_channels=model_channels * mult,
|
|
dims=dims,
|
|
use_scale_shift_norm=use_scale_shift_norm,
|
|
)
|
|
]
|
|
ch = model_channels * mult
|
|
if ds in attention_resolutions:
|
|
if num_head_channels == -1:
|
|
dim_head = ch // num_heads
|
|
else:
|
|
num_heads = ch // num_head_channels
|
|
dim_head = num_head_channels
|
|
|
|
if num_attention_blocks is None or i < num_attention_blocks[level]:
|
|
layers.append(
|
|
SpatialTransformer3D(
|
|
ch,
|
|
num_heads,
|
|
dim_head,
|
|
context_dim=context_dim,
|
|
depth=transformer_depth,
|
|
ip_dim=self.ip_dim,
|
|
ip_weight=self.ip_weight,
|
|
)
|
|
)
|
|
if level and i == self.num_res_blocks[level]:
|
|
out_ch = ch
|
|
layers.append(
|
|
ResBlock(
|
|
ch,
|
|
time_embed_dim,
|
|
dropout,
|
|
out_channels=out_ch,
|
|
dims=dims,
|
|
use_scale_shift_norm=use_scale_shift_norm,
|
|
up=True,
|
|
)
|
|
if resblock_updown
|
|
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
|
|
)
|
|
ds //= 2
|
|
self.output_blocks.append(CondSequential(*layers))
|
|
self._feature_size += ch
|
|
|
|
self.out = nn.Sequential(
|
|
nn.GroupNorm(32, ch),
|
|
nn.SiLU(),
|
|
zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
|
|
)
|
|
if self.predict_codebook_ids:
|
|
self.id_predictor = nn.Sequential(
|
|
nn.GroupNorm(32, ch),
|
|
conv_nd(dims, model_channels, n_embed, 1),
|
|
|
|
)
|
|
|
|
def forward(
|
|
self,
|
|
x,
|
|
timesteps=None,
|
|
context=None,
|
|
y=None,
|
|
camera=None,
|
|
num_frames=1,
|
|
ip=None,
|
|
ip_img=None,
|
|
**kwargs,
|
|
):
|
|
"""
|
|
Apply the model to an input batch.
|
|
:param x: an [(N x F) x C x ...] Tensor of inputs. F is the number of frames (views).
|
|
:param timesteps: a 1-D batch of timesteps.
|
|
:param context: conditioning plugged in via crossattn
|
|
:param y: an [N] Tensor of labels, if class-conditional.
|
|
:param num_frames: a integer indicating number of frames for tensor reshaping.
|
|
:return: an [(N x F) x C x ...] Tensor of outputs. F is the number of frames (views).
|
|
"""
|
|
assert (
|
|
x.shape[0] % num_frames == 0
|
|
), "input batch size must be dividable by num_frames!"
|
|
assert (y is not None) == (
|
|
self.num_classes is not None
|
|
), "must specify y if and only if the model is class-conditional"
|
|
|
|
hs = []
|
|
|
|
t_emb = timestep_embedding(
|
|
timesteps, self.model_channels, repeat_only=False
|
|
).to(x.dtype)
|
|
|
|
emb = self.time_embed(t_emb)
|
|
|
|
if self.num_classes is not None:
|
|
assert y is not None
|
|
assert y.shape[0] == x.shape[0]
|
|
emb = emb + self.label_emb(y)
|
|
|
|
|
|
if camera is not None:
|
|
emb = emb + self.camera_embed(camera)
|
|
|
|
|
|
if self.ip_dim > 0:
|
|
x[(num_frames - 1) :: num_frames, :, :, :] = ip_img
|
|
ip_emb = self.image_embed(ip)
|
|
context = torch.cat((context, ip_emb), 1)
|
|
|
|
h = x
|
|
for module in self.input_blocks:
|
|
h = module(h, emb, context, num_frames=num_frames)
|
|
hs.append(h)
|
|
h = self.middle_block(h, emb, context, num_frames=num_frames)
|
|
for module in self.output_blocks:
|
|
h = torch.cat([h, hs.pop()], dim=1)
|
|
h = module(h, emb, context, num_frames=num_frames)
|
|
h = h.type(x.dtype)
|
|
if self.predict_codebook_ids:
|
|
return self.id_predictor(h)
|
|
else:
|
|
return self.out(h)
|
|
|
|
|
|
logger = logging.get_logger(__name__)
|
|
|
|
|
|
class LGMFullPipeline(DiffusionPipeline):
|
|
|
|
_optional_components = ["feature_extractor", "image_encoder"]
|
|
|
|
def __init__(
|
|
self,
|
|
vae: AutoencoderKL,
|
|
unet: MultiViewUNetModel,
|
|
tokenizer: CLIPTokenizer,
|
|
text_encoder: CLIPTextModel,
|
|
scheduler: DDIMScheduler,
|
|
|
|
feature_extractor: CLIPImageProcessor,
|
|
image_encoder: CLIPVisionModel,
|
|
lgm,
|
|
requires_safety_checker: bool = False,
|
|
):
|
|
super().__init__()
|
|
|
|
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
|
deprecation_message = (
|
|
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
|
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
|
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
|
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
|
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
|
" file"
|
|
)
|
|
deprecate(
|
|
"steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False
|
|
)
|
|
new_config = dict(scheduler.config)
|
|
new_config["steps_offset"] = 1
|
|
scheduler._internal_dict = FrozenDict(new_config)
|
|
|
|
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
|
deprecation_message = (
|
|
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
|
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
|
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
|
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
|
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
|
)
|
|
deprecate(
|
|
"clip_sample not set", "1.0.0", deprecation_message, standard_warn=False
|
|
)
|
|
new_config = dict(scheduler.config)
|
|
new_config["clip_sample"] = False
|
|
scheduler._internal_dict = FrozenDict(new_config)
|
|
|
|
self.imagenet_default_mean = (0.485, 0.456, 0.406)
|
|
self.imagenet_default_std = (0.229, 0.224, 0.225)
|
|
|
|
lgm = lgm.half().cuda()
|
|
|
|
self.register_modules(
|
|
vae=vae,
|
|
unet=unet,
|
|
scheduler=scheduler,
|
|
tokenizer=tokenizer,
|
|
text_encoder=text_encoder,
|
|
feature_extractor=feature_extractor,
|
|
image_encoder=image_encoder,
|
|
lgm=lgm,
|
|
)
|
|
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
|
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
|
|
|
def save_ply(self, gaussians, path):
|
|
self.lgm.gs.save_ply(gaussians, path)
|
|
|
|
def enable_vae_slicing(self):
|
|
r"""
|
|
Enable sliced VAE decoding.
|
|
|
|
When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
|
|
steps. This is useful to save some memory and allow larger batch sizes.
|
|
"""
|
|
self.vae.enable_slicing()
|
|
|
|
def disable_vae_slicing(self):
|
|
r"""
|
|
Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
|
|
computing decoding in one step.
|
|
"""
|
|
self.vae.disable_slicing()
|
|
|
|
def enable_vae_tiling(self):
|
|
r"""
|
|
Enable tiled VAE decoding.
|
|
|
|
When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
|
|
several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
|
|
"""
|
|
self.vae.enable_tiling()
|
|
|
|
def disable_vae_tiling(self):
|
|
r"""
|
|
Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
|
|
computing decoding in one step.
|
|
"""
|
|
self.vae.disable_tiling()
|
|
|
|
def enable_sequential_cpu_offload(self, gpu_id=0):
|
|
r"""
|
|
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
|
|
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
|
|
`torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
|
|
Note that offloading happens on a submodule basis. Memory savings are higher than with
|
|
`enable_model_cpu_offload`, but performance is lower.
|
|
"""
|
|
if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
|
|
from accelerate import cpu_offload
|
|
else:
|
|
raise ImportError(
|
|
"`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher"
|
|
)
|
|
|
|
device = torch.device(f"cuda:{gpu_id}")
|
|
|
|
if self.device.type != "cpu":
|
|
self.to("cpu", silence_dtype_warnings=True)
|
|
torch.cuda.empty_cache()
|
|
|
|
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
|
|
cpu_offload(cpu_offloaded_model, device)
|
|
|
|
def enable_model_cpu_offload(self, gpu_id=0):
|
|
r"""
|
|
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
|
|
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
|
|
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
|
|
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
|
|
"""
|
|
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
|
from accelerate import cpu_offload_with_hook
|
|
else:
|
|
raise ImportError(
|
|
"`enable_model_offload` requires `accelerate v0.17.0` or higher."
|
|
)
|
|
|
|
device = torch.device(f"cuda:{gpu_id}")
|
|
|
|
if self.device.type != "cpu":
|
|
self.to("cpu", silence_dtype_warnings=True)
|
|
torch.cuda.empty_cache()
|
|
|
|
hook = None
|
|
for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
|
|
_, hook = cpu_offload_with_hook(
|
|
cpu_offloaded_model, device, prev_module_hook=hook
|
|
)
|
|
|
|
|
|
self.final_offload_hook = hook
|
|
|
|
@property
|
|
def _execution_device(self):
|
|
r"""
|
|
Returns the device on which the pipeline's models will be executed. After calling
|
|
`pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
|
|
hooks.
|
|
"""
|
|
if not hasattr(self.unet, "_hf_hook"):
|
|
return self.device
|
|
for module in self.unet.modules():
|
|
if (
|
|
hasattr(module, "_hf_hook")
|
|
and hasattr(module._hf_hook, "execution_device")
|
|
and module._hf_hook.execution_device is not None
|
|
):
|
|
return torch.device(module._hf_hook.execution_device)
|
|
return self.device
|
|
|
|
def _encode_prompt(
|
|
self,
|
|
prompt,
|
|
device,
|
|
num_images_per_prompt,
|
|
do_classifier_free_guidance: bool,
|
|
negative_prompt=None,
|
|
):
|
|
r"""
|
|
Encodes the prompt into text encoder hidden states.
|
|
|
|
Args:
|
|
prompt (`str` or `List[str]`, *optional*):
|
|
prompt to be encoded
|
|
device: (`torch.device`):
|
|
torch device
|
|
num_images_per_prompt (`int`):
|
|
number of images that should be generated per prompt
|
|
do_classifier_free_guidance (`bool`):
|
|
whether to use classifier free guidance or not
|
|
negative_prompt (`str` or `List[str]`, *optional*):
|
|
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
|
`negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
|
|
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
|
|
prompt_embeds (`torch.FloatTensor`, *optional*):
|
|
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
|
provided, text embeddings will be generated from `prompt` input argument.
|
|
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
|
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
|
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
|
argument.
|
|
"""
|
|
if prompt is not None and isinstance(prompt, str):
|
|
batch_size = 1
|
|
elif prompt is not None and isinstance(prompt, list):
|
|
batch_size = len(prompt)
|
|
else:
|
|
raise ValueError(
|
|
f"`prompt` should be either a string or a list of strings, but got {type(prompt)}."
|
|
)
|
|
|
|
text_inputs = self.tokenizer(
|
|
prompt,
|
|
padding="max_length",
|
|
max_length=self.tokenizer.model_max_length,
|
|
truncation=True,
|
|
return_tensors="pt",
|
|
)
|
|
text_input_ids = text_inputs.input_ids
|
|
untruncated_ids = self.tokenizer(
|
|
prompt, padding="longest", return_tensors="pt"
|
|
).input_ids
|
|
|
|
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
|
text_input_ids, untruncated_ids
|
|
):
|
|
removed_text = self.tokenizer.batch_decode(
|
|
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
|
)
|
|
logger.warning(
|
|
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
|
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
|
)
|
|
|
|
if (
|
|
hasattr(self.text_encoder.config, "use_attention_mask")
|
|
and self.text_encoder.config.use_attention_mask
|
|
):
|
|
attention_mask = text_inputs.attention_mask.to(device)
|
|
else:
|
|
attention_mask = None
|
|
|
|
prompt_embeds = self.text_encoder(
|
|
text_input_ids.to(device),
|
|
attention_mask=attention_mask,
|
|
)
|
|
prompt_embeds = prompt_embeds[0]
|
|
|
|
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
|
|
|
bs_embed, seq_len, _ = prompt_embeds.shape
|
|
|
|
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
|
prompt_embeds = prompt_embeds.view(
|
|
bs_embed * num_images_per_prompt, seq_len, -1
|
|
)
|
|
|
|
|
|
if do_classifier_free_guidance:
|
|
uncond_tokens: List[str]
|
|
if negative_prompt is None:
|
|
uncond_tokens = [""] * batch_size
|
|
elif type(prompt) is not type(negative_prompt):
|
|
raise TypeError(
|
|
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
|
f" {type(prompt)}."
|
|
)
|
|
elif isinstance(negative_prompt, str):
|
|
uncond_tokens = [negative_prompt]
|
|
elif batch_size != len(negative_prompt):
|
|
raise ValueError(
|
|
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
|
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
|
" the batch size of `prompt`."
|
|
)
|
|
else:
|
|
uncond_tokens = negative_prompt
|
|
|
|
max_length = prompt_embeds.shape[1]
|
|
uncond_input = self.tokenizer(
|
|
uncond_tokens,
|
|
padding="max_length",
|
|
max_length=max_length,
|
|
truncation=True,
|
|
return_tensors="pt",
|
|
)
|
|
|
|
if (
|
|
hasattr(self.text_encoder.config, "use_attention_mask")
|
|
and self.text_encoder.config.use_attention_mask
|
|
):
|
|
attention_mask = uncond_input.attention_mask.to(device)
|
|
else:
|
|
attention_mask = None
|
|
|
|
negative_prompt_embeds = self.text_encoder(
|
|
uncond_input.input_ids.to(device),
|
|
attention_mask=attention_mask,
|
|
)
|
|
negative_prompt_embeds = negative_prompt_embeds[0]
|
|
|
|
|
|
seq_len = negative_prompt_embeds.shape[1]
|
|
|
|
negative_prompt_embeds = negative_prompt_embeds.to(
|
|
dtype=self.text_encoder.dtype, device=device
|
|
)
|
|
|
|
negative_prompt_embeds = negative_prompt_embeds.repeat(
|
|
1, num_images_per_prompt, 1
|
|
)
|
|
negative_prompt_embeds = negative_prompt_embeds.view(
|
|
batch_size * num_images_per_prompt, seq_len, -1
|
|
)
|
|
|
|
|
|
|
|
|
|
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
|
|
|
return prompt_embeds
|
|
|
|
def decode_latents(self, latents):
|
|
latents = 1 / self.vae.config.scaling_factor * latents
|
|
image = self.vae.decode(latents).sample
|
|
image = (image / 2 + 0.5).clamp(0, 1)
|
|
|
|
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
|
return image
|
|
|
|
def prepare_extra_step_kwargs(self, generator, eta):
|
|
|
|
|
|
|
|
|
|
|
|
accepts_eta = "eta" in set(
|
|
inspect.signature(self.scheduler.step).parameters.keys()
|
|
)
|
|
extra_step_kwargs = {}
|
|
if accepts_eta:
|
|
extra_step_kwargs["eta"] = eta
|
|
|
|
|
|
accepts_generator = "generator" in set(
|
|
inspect.signature(self.scheduler.step).parameters.keys()
|
|
)
|
|
if accepts_generator:
|
|
extra_step_kwargs["generator"] = generator
|
|
return extra_step_kwargs
|
|
|
|
def prepare_latents(
|
|
self,
|
|
batch_size,
|
|
num_channels_latents,
|
|
height,
|
|
width,
|
|
dtype,
|
|
device,
|
|
generator,
|
|
latents=None,
|
|
):
|
|
shape = (
|
|
batch_size,
|
|
num_channels_latents,
|
|
height // self.vae_scale_factor,
|
|
width // self.vae_scale_factor,
|
|
)
|
|
if isinstance(generator, list) and len(generator) != batch_size:
|
|
raise ValueError(
|
|
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
|
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
|
)
|
|
|
|
if latents is None:
|
|
latents = randn_tensor(
|
|
shape, generator=generator, device=device, dtype=dtype
|
|
)
|
|
else:
|
|
latents = latents.to(device)
|
|
|
|
|
|
latents = latents * self.scheduler.init_noise_sigma
|
|
return latents
|
|
|
|
def encode_image(self, image, device, num_images_per_prompt):
|
|
dtype = next(self.image_encoder.parameters()).dtype
|
|
|
|
if image.dtype == np.float32:
|
|
image = (image * 255).astype(np.uint8)
|
|
|
|
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
|
image = image.to(device=device, dtype=dtype)
|
|
|
|
image_embeds = self.image_encoder(
|
|
image, output_hidden_states=True
|
|
).hidden_states[-2]
|
|
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
|
|
|
return torch.zeros_like(image_embeds), image_embeds
|
|
|
|
def encode_image_latents(self, image, device, num_images_per_prompt):
|
|
|
|
dtype = next(self.image_encoder.parameters()).dtype
|
|
|
|
image = (
|
|
torch.from_numpy(image).unsqueeze(0).permute(0, 3, 1, 2).to(device=device)
|
|
)
|
|
image = 2 * image - 1
|
|
image = F.interpolate(image, (256, 256), mode="bilinear", align_corners=False)
|
|
image = image.to(dtype=dtype)
|
|
|
|
posterior = self.vae.encode(image).latent_dist
|
|
latents = posterior.sample() * self.vae.config.scaling_factor
|
|
latents = latents.repeat_interleave(num_images_per_prompt, dim=0)
|
|
|
|
return torch.zeros_like(latents), latents
|
|
|
|
@torch.no_grad()
|
|
def __call__(
|
|
self,
|
|
prompt: str = "",
|
|
image: Optional[np.ndarray] = None,
|
|
height: int = 256,
|
|
width: int = 256,
|
|
elevation: float = 0,
|
|
num_inference_steps: int = 50,
|
|
guidance_scale: float = 7.0,
|
|
negative_prompt: str = "",
|
|
num_images_per_prompt: int = 1,
|
|
eta: float = 0.0,
|
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
|
output_type: Optional[str] = "numpy",
|
|
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
|
callback_steps: int = 1,
|
|
num_frames: int = 4,
|
|
device=torch.device("cuda:0"),
|
|
):
|
|
self.unet = self.unet.to(device=device)
|
|
self.vae = self.vae.to(device=device)
|
|
self.text_encoder = self.text_encoder.to(device=device)
|
|
|
|
|
|
|
|
|
|
do_classifier_free_guidance = guidance_scale > 1.0
|
|
|
|
|
|
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
|
timesteps = self.scheduler.timesteps
|
|
|
|
|
|
if image is not None:
|
|
assert isinstance(image, np.ndarray) and image.dtype == np.float32
|
|
self.image_encoder = self.image_encoder.to(device=device)
|
|
image_embeds_neg, image_embeds_pos = self.encode_image(
|
|
image, device, num_images_per_prompt
|
|
)
|
|
image_latents_neg, image_latents_pos = self.encode_image_latents(
|
|
image, device, num_images_per_prompt
|
|
)
|
|
|
|
_prompt_embeds = self._encode_prompt(
|
|
prompt=prompt,
|
|
device=device,
|
|
num_images_per_prompt=num_images_per_prompt,
|
|
do_classifier_free_guidance=do_classifier_free_guidance,
|
|
negative_prompt=negative_prompt,
|
|
)
|
|
prompt_embeds_neg, prompt_embeds_pos = _prompt_embeds.chunk(2)
|
|
|
|
|
|
actual_num_frames = num_frames if image is None else num_frames + 1
|
|
latents: torch.Tensor = self.prepare_latents(
|
|
actual_num_frames * num_images_per_prompt,
|
|
4,
|
|
height,
|
|
width,
|
|
prompt_embeds_pos.dtype,
|
|
device,
|
|
generator,
|
|
None,
|
|
)
|
|
|
|
|
|
camera = get_camera(
|
|
num_frames, elevation=elevation, extra_view=(image is not None)
|
|
).to(dtype=latents.dtype, device=device)
|
|
camera = camera.repeat_interleave(num_images_per_prompt, dim=0)
|
|
|
|
|
|
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
|
|
|
|
|
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
|
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
|
for i, t in enumerate(timesteps):
|
|
|
|
multiplier = 2 if do_classifier_free_guidance else 1
|
|
latent_model_input = torch.cat([latents] * multiplier)
|
|
latent_model_input = self.scheduler.scale_model_input(
|
|
latent_model_input, t
|
|
)
|
|
|
|
unet_inputs = {
|
|
"x": latent_model_input,
|
|
"timesteps": torch.tensor(
|
|
[t] * actual_num_frames * multiplier,
|
|
dtype=latent_model_input.dtype,
|
|
device=device,
|
|
),
|
|
"context": torch.cat(
|
|
[prompt_embeds_neg] * actual_num_frames
|
|
+ [prompt_embeds_pos] * actual_num_frames
|
|
),
|
|
"num_frames": actual_num_frames,
|
|
"camera": torch.cat([camera] * multiplier),
|
|
}
|
|
|
|
if image is not None:
|
|
unet_inputs["ip"] = torch.cat(
|
|
[image_embeds_neg] * actual_num_frames
|
|
+ [image_embeds_pos] * actual_num_frames
|
|
)
|
|
unet_inputs["ip_img"] = torch.cat(
|
|
[image_latents_neg] + [image_latents_pos]
|
|
)
|
|
|
|
|
|
noise_pred = self.unet.forward(**unet_inputs)
|
|
|
|
|
|
if do_classifier_free_guidance:
|
|
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
|
noise_pred = noise_pred_uncond + guidance_scale * (
|
|
noise_pred_text - noise_pred_uncond
|
|
)
|
|
|
|
|
|
latents: torch.Tensor = self.scheduler.step(
|
|
noise_pred, t, latents, **extra_step_kwargs, return_dict=False
|
|
)[0]
|
|
|
|
|
|
if i == len(timesteps) - 1 or (
|
|
(i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
|
|
):
|
|
progress_bar.update()
|
|
if callback is not None and i % callback_steps == 0:
|
|
callback(i, t, latents)
|
|
|
|
|
|
if output_type == "latent":
|
|
image = latents
|
|
elif output_type == "pil":
|
|
image = self.decode_latents(latents)
|
|
image = self.numpy_to_pil(image)
|
|
else:
|
|
image = self.decode_latents(latents)
|
|
|
|
|
|
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
|
self.final_offload_hook.offload()
|
|
|
|
images = np.stack([image[1], image[2], image[3], image[0]], axis=0)
|
|
images = torch.from_numpy(images).permute(0, 3, 1, 2).float().cuda()
|
|
images = F.interpolate(
|
|
images,
|
|
size=(256, 256),
|
|
mode="bilinear",
|
|
align_corners=False,
|
|
)
|
|
images = TF.normalize(
|
|
images, self.imagenet_default_mean, self.imagenet_default_std
|
|
)
|
|
|
|
rays_embeddings = self.lgm.prepare_default_rays("cuda", elevation=0)
|
|
images = torch.cat([images, rays_embeddings], dim=1).unsqueeze(0)
|
|
images = images.half().cuda()
|
|
|
|
result = self.lgm(images)
|
|
return result
|
|
|