|
from ..modeling_videobase import VideoBaseAE |
|
from ..modules import Normalize |
|
from ..modules.ops import nonlinearity |
|
from typing import List, Tuple |
|
import torch.nn as nn |
|
|
|
from ..utils.module_utils import resolve_str_to_obj, Module |
|
from ..utils.distrib_utils import DiagonalGaussianDistribution |
|
from ..utils.scheduler_utils import cosine_scheduler |
|
from ...utils.utils import custom_to_video |
|
|
|
import torch |
|
from diffusers.configuration_utils import register_to_config |
|
from copy import deepcopy |
|
import os |
|
import glob |
|
|
|
import numpy as np |
|
from ...eval.cal_psnr import calculate_psnr |
|
from decord import VideoReader, cpu |
|
from pytorchvideo.transforms import ShortSideScale |
|
from torchvision.io import read_video |
|
from torchvision.transforms import Lambda, Compose |
|
from torchvision.transforms._transforms_video import CenterCropVideo |
|
|
|
class Encoder(nn.Module): |
|
def __init__( |
|
self, |
|
hidden_size: int, |
|
hidden_size_mult: Tuple[int] = (1, 2, 4, 4), |
|
attn_resolutions: Tuple[int] = (16,), |
|
conv_in: Module = "Conv2d", |
|
attention: Module = "AttnBlock", |
|
resnet_blocks: Tuple[Module] = ( |
|
"ResnetBlock2D", |
|
"ResnetBlock2D", |
|
"ResnetBlock2D", |
|
"ResnetBlock3D", |
|
), |
|
spatial_downsample: Tuple[Module] = ( |
|
"Downsample", |
|
"Downsample", |
|
"Downsample", |
|
"", |
|
), |
|
dropout: float = 0.0, |
|
resolution: int = 256, |
|
num_res_blocks: int = 2, |
|
) -> None: |
|
super().__init__() |
|
assert len(resnet_blocks) == len(hidden_size_mult), print( |
|
hidden_size_mult, resnet_blocks |
|
) |
|
|
|
self.num_resolutions = len(hidden_size_mult) |
|
self.resolution = resolution |
|
self.num_res_blocks = num_res_blocks |
|
|
|
|
|
self.conv_in = resolve_str_to_obj(conv_in)( |
|
3, hidden_size, kernel_size=3, stride=1, padding=1 |
|
) |
|
|
|
|
|
curr_res = resolution |
|
in_ch_mult = (1,) + tuple(hidden_size_mult) |
|
self.in_ch_mult = in_ch_mult |
|
self.down = nn.ModuleList() |
|
for i_level in range(self.num_resolutions): |
|
block = nn.ModuleList() |
|
attn = nn.ModuleList() |
|
block_in = hidden_size * in_ch_mult[i_level] |
|
block_out = hidden_size * hidden_size_mult[i_level] |
|
for i_block in range(self.num_res_blocks): |
|
block.append( |
|
resolve_str_to_obj(resnet_blocks[i_level])( |
|
in_channels=block_in, |
|
out_channels=block_out, |
|
dropout=dropout, |
|
) |
|
) |
|
block_in = block_out |
|
if curr_res in attn_resolutions: |
|
attn.append(resolve_str_to_obj(attention)(block_in)) |
|
down = nn.Module() |
|
down.block = block |
|
down.attn = attn |
|
if spatial_downsample[i_level]: |
|
down.downsample = resolve_str_to_obj(spatial_downsample[i_level])( |
|
block_in, block_in |
|
) |
|
curr_res = curr_res // 2 |
|
self.down.append(down) |
|
|
|
def forward(self, x): |
|
h = self.conv_in(x) |
|
h_ = [] |
|
for i_level in range(self.num_resolutions): |
|
for i_block in range(self.num_res_blocks): |
|
h = self.down[i_level].block[i_block](h) |
|
if len(self.down[i_level].attn) > 0: |
|
h = self.down[i_level].attn[i_block](h) |
|
if hasattr(self.down[i_level], "downsample"): |
|
h_.append(h) |
|
h = self.down[i_level].downsample(h) |
|
|
|
return h, h_ |
|
|
|
|
|
class Decoder(nn.Module): |
|
def __init__( |
|
self, |
|
hidden_size: int, |
|
hidden_size_mult: Tuple[int] = (1, 2, 4, 4), |
|
attn_resolutions: Tuple[int] = (16,), |
|
conv_out: Module = "CasualConv3d", |
|
attention: Module = "AttnBlock", |
|
resnet_blocks: Tuple[Module] = ( |
|
"ResnetBlock3D", |
|
"ResnetBlock3D", |
|
"ResnetBlock3D", |
|
"ResnetBlock3D", |
|
), |
|
spatial_upsample: Tuple[Module] = ( |
|
"", |
|
"SpatialUpsample2x", |
|
"SpatialUpsample2x", |
|
"SpatialUpsample2x", |
|
), |
|
dropout: float = 0.0, |
|
resolution: int = 256, |
|
num_res_blocks: int = 2, |
|
): |
|
super().__init__() |
|
|
|
self.num_resolutions = len(hidden_size_mult) |
|
self.resolution = resolution |
|
self.num_res_blocks = num_res_blocks |
|
|
|
|
|
block_in = hidden_size * hidden_size_mult[self.num_resolutions - 1] |
|
curr_res = resolution // 2 ** (self.num_resolutions - 1) |
|
|
|
|
|
self.up = nn.ModuleList() |
|
for i_level in reversed(range(self.num_resolutions)): |
|
block = nn.ModuleList() |
|
attn = nn.ModuleList() |
|
skip = nn.ModuleList() |
|
block_out = hidden_size * hidden_size_mult[i_level] |
|
for i_block in range(self.num_res_blocks): |
|
block.append( |
|
resolve_str_to_obj(resnet_blocks[i_level])( |
|
in_channels=block_in, |
|
out_channels=block_out, |
|
dropout=dropout, |
|
) |
|
) |
|
block_in = block_out |
|
if curr_res in attn_resolutions: |
|
attn.append(resolve_str_to_obj(attention)(block_in)) |
|
up = nn.Module() |
|
up.block = block |
|
up.attn = attn |
|
up.skip = skip |
|
if spatial_upsample[i_level]: |
|
up.upsample = resolve_str_to_obj(spatial_upsample[i_level])( |
|
block_in, block_in |
|
) |
|
up.skip = resolve_str_to_obj(conv_out)(block_in+hidden_size * hidden_size_mult[i_level-1], |
|
block_in, kernel_size=3, padding=1) |
|
curr_res = curr_res * 2 |
|
self.up.insert(0, up) |
|
|
|
|
|
self.norm_out = Normalize(block_in) |
|
self.conv_out = resolve_str_to_obj(conv_out)( |
|
block_in, 3, kernel_size=3, padding=1 |
|
) |
|
|
|
def forward(self, h, h_): |
|
|
|
for i_level in reversed(range(self.num_resolutions)): |
|
for i_block in range(self.num_res_blocks): |
|
h = self.up[i_level].block[i_block](h) |
|
if len(self.up[i_level].attn) > 0: |
|
h = self.up[i_level].attn[i_block](h) |
|
if hasattr(self.up[i_level], "upsample"): |
|
h = self.up[i_level].upsample(h) |
|
h = torch.concat([h_[i_level-1], h], dim=1) |
|
h = self.up[i_level].skip(h) |
|
|
|
h = self.norm_out(h) |
|
h = nonlinearity(h) |
|
h = self.conv_out(h) |
|
return h |
|
|
|
|
|
class Refiner(VideoBaseAE): |
|
|
|
@register_to_config |
|
def __init__( |
|
self, |
|
hidden_size: int = 128, |
|
hidden_size_mult: Tuple[int] = (1, 2, 4, 4), |
|
attn_resolutions: Tuple[int] = [], |
|
dropout: float = 0.0, |
|
resolution: int = 256, |
|
num_res_blocks: int = 2, |
|
encoder_conv_in: Module = "CausalConv3d", |
|
encoder_attention: Module = "AttnBlock3D", |
|
encoder_resnet_blocks: Tuple[Module] = ( |
|
"ResnetBlock3D", |
|
"ResnetBlock3D", |
|
"ResnetBlock3D", |
|
"ResnetBlock3D", |
|
), |
|
encoder_spatial_downsample: Tuple[Module] = ( |
|
"SpatialDownsample2x", |
|
"SpatialDownsample2x", |
|
"SpatialDownsample2x", |
|
"", |
|
), |
|
decoder_conv_out: Module = "CausalConv3d", |
|
decoder_attention: Module = "AttnBlock3D", |
|
decoder_resnet_blocks: Tuple[Module] = ( |
|
"ResnetBlock3D", |
|
"ResnetBlock3D", |
|
"ResnetBlock3D", |
|
"ResnetBlock3D", |
|
), |
|
decoder_spatial_upsample: Tuple[Module] = ( |
|
"", |
|
"SpatialUpsample2x", |
|
"SpatialUpsample2x", |
|
"SpatialUpsample2x", |
|
), |
|
) -> None: |
|
super().__init__() |
|
|
|
self.tile_sample_min_size = 256 |
|
self.tile_sample_min_size_t = 65 |
|
self.tile_latent_min_size = int(self.tile_sample_min_size / (2 ** (len(hidden_size_mult) - 1))) |
|
self.tile_overlap_factor = 0.25 |
|
self.use_tiling = False |
|
|
|
self.encoder = Encoder( |
|
hidden_size=hidden_size, |
|
hidden_size_mult=hidden_size_mult, |
|
attn_resolutions=attn_resolutions, |
|
conv_in=encoder_conv_in, |
|
attention=encoder_attention, |
|
resnet_blocks=encoder_resnet_blocks, |
|
spatial_downsample=encoder_spatial_downsample, |
|
dropout=dropout, |
|
resolution=resolution, |
|
num_res_blocks=num_res_blocks, |
|
) |
|
|
|
self.decoder = Decoder( |
|
hidden_size=hidden_size, |
|
hidden_size_mult=hidden_size_mult, |
|
attn_resolutions=attn_resolutions, |
|
conv_out=decoder_conv_out, |
|
attention=decoder_attention, |
|
resnet_blocks=decoder_resnet_blocks, |
|
spatial_upsample=decoder_spatial_upsample, |
|
dropout=dropout, |
|
resolution=resolution, |
|
num_res_blocks=num_res_blocks, |
|
) |
|
|
|
def get_encoder(self): |
|
return [self.encoder] |
|
|
|
def get_decoder(self): |
|
return [self.decoder] |
|
|
|
def encode(self, x): |
|
if self.use_tiling and ( |
|
x.shape[-1] > self.tile_sample_min_size |
|
or x.shape[-2] > self.tile_sample_min_size |
|
or x.shape[-3] > self.tile_sample_min_size_t |
|
): |
|
return self.tiled_encode(x) |
|
enc = self.encoder(x) |
|
return enc |
|
|
|
def decode(self, z): |
|
if self.use_tiling and ( |
|
z.shape[-1] > self.tile_latent_min_size |
|
or z.shape[-2] > self.tile_latent_min_size |
|
or z.shape[-3] > self.tile_latent_min_size_t |
|
): |
|
return self.tiled_decode(z) |
|
dec = self.decoder(z) |
|
return dec |
|
|
|
def forward(self, input): |
|
enc, enc_ = self.encoder(input) |
|
dec = self.decoder(enc, enc_) |
|
return dec+input |
|
|
|
def on_train_start(self): |
|
self.ema = deepcopy(self) if self.save_ema==True else None |
|
|
|
def get_last_layer(self): |
|
if hasattr(self.decoder.conv_out, "conv"): |
|
return self.decoder.conv_out.conv.weight |
|
else: |
|
return self.decoder.conv_out.weight |
|
|
|
def blend_v( |
|
self, a: torch.Tensor, b: torch.Tensor, blend_extent: int |
|
) -> torch.Tensor: |
|
blend_extent = min(a.shape[3], b.shape[3], blend_extent) |
|
for y in range(blend_extent): |
|
b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * ( |
|
1 - y / blend_extent |
|
) + b[:, :, :, y, :] * (y / blend_extent) |
|
return b |
|
|
|
def blend_h( |
|
self, a: torch.Tensor, b: torch.Tensor, blend_extent: int |
|
) -> torch.Tensor: |
|
blend_extent = min(a.shape[4], b.shape[4], blend_extent) |
|
for x in range(blend_extent): |
|
b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * ( |
|
1 - x / blend_extent |
|
) + b[:, :, :, :, x] * (x / blend_extent) |
|
return b |
|
|
|
def tiled_encode(self, x): |
|
t = x.shape[2] |
|
t_chunk_idx = [i for i in range(0, t, self.tile_sample_min_size_t-1)] |
|
if len(t_chunk_idx) == 1 and t_chunk_idx[0] == 0: |
|
t_chunk_start_end = [[0, t]] |
|
else: |
|
t_chunk_start_end = [[t_chunk_idx[i], t_chunk_idx[i+1]+1] for i in range(len(t_chunk_idx)-1)] |
|
if t_chunk_start_end[-1][-1] > t: |
|
t_chunk_start_end[-1][-1] = t |
|
elif t_chunk_start_end[-1][-1] < t: |
|
last_start_end = [t_chunk_idx[-1], t] |
|
t_chunk_start_end.append(last_start_end) |
|
moments = [] |
|
for idx, (start, end) in enumerate(t_chunk_start_end): |
|
chunk_x = x[:, :, start: end] |
|
if idx != 0: |
|
moment = self.tiled_encode2d(chunk_x, return_moments=True)[:, :, 1:] |
|
else: |
|
moment = self.tiled_encode2d(chunk_x, return_moments=True) |
|
moments.append(moment) |
|
moments = torch.cat(moments, dim=2) |
|
return moments |
|
|
|
def tiled_decode(self, x): |
|
t = x.shape[2] |
|
t_chunk_idx = [i for i in range(0, t, self.tile_latent_min_size_t-1)] |
|
if len(t_chunk_idx) == 1 and t_chunk_idx[0] == 0: |
|
t_chunk_start_end = [[0, t]] |
|
else: |
|
t_chunk_start_end = [[t_chunk_idx[i], t_chunk_idx[i+1]+1] for i in range(len(t_chunk_idx)-1)] |
|
if t_chunk_start_end[-1][-1] > t: |
|
t_chunk_start_end[-1][-1] = t |
|
elif t_chunk_start_end[-1][-1] < t: |
|
last_start_end = [t_chunk_idx[-1], t] |
|
t_chunk_start_end.append(last_start_end) |
|
dec_ = [] |
|
for idx, (start, end) in enumerate(t_chunk_start_end): |
|
chunk_x = x[:, :, start: end] |
|
if idx != 0: |
|
dec = self.tiled_decode2d(chunk_x)[:, :, 1:] |
|
else: |
|
dec = self.tiled_decode2d(chunk_x) |
|
dec_.append(dec) |
|
dec_ = torch.cat(dec_, dim=2) |
|
return dec_ |
|
|
|
def tiled_encode2d(self, x, return_moments=False): |
|
overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) |
|
blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) |
|
row_limit = self.tile_latent_min_size - blend_extent |
|
|
|
|
|
rows = [] |
|
for i in range(0, x.shape[3], overlap_size): |
|
row = [] |
|
for j in range(0, x.shape[4], overlap_size): |
|
tile = x[ |
|
:, |
|
:, |
|
:, |
|
i : i + self.tile_sample_min_size, |
|
j : j + self.tile_sample_min_size, |
|
] |
|
tile = self.encoder(tile) |
|
row.append(tile) |
|
rows.append(row) |
|
result_rows = [] |
|
for i, row in enumerate(rows): |
|
result_row = [] |
|
for j, tile in enumerate(row): |
|
|
|
|
|
if i > 0: |
|
tile = self.blend_v(rows[i - 1][j], tile, blend_extent) |
|
if j > 0: |
|
tile = self.blend_h(row[j - 1], tile, blend_extent) |
|
result_row.append(tile[:, :, :, :row_limit, :row_limit]) |
|
result_rows.append(torch.cat(result_row, dim=4)) |
|
|
|
moments = torch.cat(result_rows, dim=3) |
|
posterior = DiagonalGaussianDistribution(moments) |
|
return posterior |
|
|
|
def tiled_decode2d(self, z): |
|
|
|
overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor)) |
|
blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor) |
|
row_limit = self.tile_sample_min_size - blend_extent |
|
|
|
|
|
|
|
rows = [] |
|
for i in range(0, z.shape[3], overlap_size): |
|
row = [] |
|
for j in range(0, z.shape[4], overlap_size): |
|
tile = z[ |
|
:, |
|
:, |
|
:, |
|
i : i + self.tile_latent_min_size, |
|
j : j + self.tile_latent_min_size, |
|
] |
|
if self.use_quant_layer: |
|
tile = self.post_quant_conv(tile) |
|
decoded = self.decoder(tile) |
|
row.append(decoded) |
|
rows.append(row) |
|
result_rows = [] |
|
for i, row in enumerate(rows): |
|
result_row = [] |
|
for j, tile in enumerate(row): |
|
|
|
|
|
if i > 0: |
|
tile = self.blend_v(rows[i - 1][j], tile, blend_extent) |
|
if j > 0: |
|
tile = self.blend_h(row[j - 1], tile, blend_extent) |
|
result_row.append(tile[:, :, :, :row_limit, :row_limit]) |
|
result_rows.append(torch.cat(result_row, dim=4)) |
|
|
|
dec = torch.cat(result_rows, dim=3) |
|
return dec |
|
|
|
def enable_tiling(self, use_tiling: bool = True): |
|
self.use_tiling = use_tiling |
|
|
|
def disable_tiling(self): |
|
self.enable_tiling(False) |
|
|
|
def init_from_ckpt(self, path, ignore_keys=list()): |
|
sd = torch.load(path, map_location="cpu") |
|
print("init from " + path) |
|
|
|
if "ema_state_dict" in sd and len(sd['ema_state_dict']) > 0 and os.environ.get("NOT_USE_EMA_MODEL", 0) == 0: |
|
print("Load from ema model!") |
|
sd = sd["ema_state_dict"] |
|
sd = {key.replace("module.", ""): value for key, value in sd.items()} |
|
elif "state_dict" in sd: |
|
print("Load from normal model!") |
|
if "gen_model" in sd["state_dict"]: |
|
sd = sd["state_dict"]["gen_model"] |
|
else: |
|
sd = sd["state_dict"] |
|
|
|
keys = list(sd.keys()) |
|
|
|
for k in keys: |
|
for ik in ignore_keys: |
|
if k.startswith(ik): |
|
print("Deleting key {} from state_dict.".format(k)) |
|
del sd[k] |
|
|
|
self.load_state_dict(sd, strict=True) |
|
|
|
|
|
|
|
|
|
|
|
|