|
import torch |
|
import torch.nn as nn |
|
|
|
|
|
class UpsampleBlock(nn.Module): |
|
def __init__( |
|
self, |
|
in_channels: int, |
|
out_channels: int, |
|
): |
|
super().__init__() |
|
self.in_channels = in_channels |
|
self.out_channels = out_channels |
|
self.conv_in = nn.Sequential( |
|
nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1), |
|
nn.GELU() |
|
) |
|
self.conv_up = nn.Sequential( |
|
nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2), |
|
nn.GELU() |
|
) |
|
|
|
self.conv_out = nn.Sequential( |
|
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1) |
|
) |
|
|
|
def forward(self, x): |
|
x = self.conv_in(x) |
|
x = self.conv_up(x) |
|
x = self.conv_out(x) |
|
return x |
|
|
|
|
|
class CLIPImagePreProcessor(nn.Module): |
|
def __init__( |
|
self, |
|
input_size=896, |
|
clip_input_size=224, |
|
downscale_factor: int = 16, |
|
): |
|
super().__init__() |
|
|
|
assert input_size % clip_input_size == 0 |
|
in_channels = 3 |
|
|
|
self.input_size = input_size |
|
self.clip_input_size = clip_input_size |
|
self.downscale_factor = downscale_factor |
|
|
|
subpixel_channels = in_channels * downscale_factor ** 2 |
|
channels = subpixel_channels |
|
|
|
upscale_factor = downscale_factor / int((input_size / clip_input_size)) |
|
|
|
num_upsample_blocks = int(upscale_factor // 2) |
|
|
|
|
|
self.upsample_blocks = nn.ModuleList() |
|
self.subpixel_blocks = nn.ModuleList() |
|
current_channels = channels |
|
current_downscale = downscale_factor |
|
for _ in range(num_upsample_blocks): |
|
|
|
output_downscale = current_downscale // 2 |
|
out_channels = in_channels * output_downscale ** 2 |
|
|
|
self.upsample_blocks.append(UpsampleBlock(current_channels, out_channels)) |
|
current_channels = out_channels |
|
current_downscale = output_downscale |
|
self.subpixel_blocks.append(nn.PixelUnshuffle(current_downscale)) |
|
|
|
|
|
|
|
|
|
self.conv_out = nn.Conv2d( |
|
current_channels, |
|
out_channels=3, |
|
kernel_size=3, |
|
padding=1 |
|
) |
|
|
|
|
|
|
|
kernel_size = input_size // clip_input_size |
|
self.res_down = nn.AvgPool2d( |
|
kernel_size=kernel_size, |
|
stride=kernel_size |
|
) |
|
|
|
|
|
self.res_blend = nn.Parameter(torch.tensor(0.001)) |
|
|
|
self.unshuffle = nn.PixelUnshuffle(downscale_factor) |
|
|
|
self.conv_in = nn.Sequential( |
|
nn.Conv2d( |
|
subpixel_channels, |
|
channels, |
|
kernel_size=3, |
|
padding=1 |
|
), |
|
nn.GELU() |
|
) |
|
|
|
|
|
|
|
def forward(self, x): |
|
inputs = x |
|
|
|
x = nn.functional.interpolate(x, size=(self.input_size, self.input_size), mode='bicubic') |
|
|
|
res = self.res_down(inputs) |
|
|
|
x = self.unshuffle(x) |
|
x = self.conv_in(x) |
|
for up, subpixel in zip(self.upsample_blocks, self.subpixel_blocks): |
|
x = up(x) |
|
block_res = subpixel(inputs) |
|
x = x + block_res |
|
x = self.conv_out(x) |
|
|
|
x = x * self.res_blend + res |
|
return x |
|
|