|
import argparse |
|
import torch |
|
import torch.nn as nn |
|
from pathlib import Path |
|
import os |
|
from tqdm import tqdm |
|
import torchvision.transforms as transforms |
|
from torchvision.utils import save_image |
|
|
|
import os |
|
import zipfile |
|
from PIL import Image |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from models.pix2pix_model import Pix2PixModel |
|
from options.test_options import TestOptions |
|
import numpy as np |
|
opt = TestOptions().parse() |
|
|
|
|
|
|
|
|
|
def test_transform(size, crop): |
|
transform_list = [] |
|
|
|
if size != 0: |
|
transform_list.append(transforms.Resize(size)) |
|
if crop: |
|
transform_list.append(transforms.CenterCrop(size)) |
|
transform_list.append(transforms.ToTensor()) |
|
transform = transforms.Compose(transform_list) |
|
return transform |
|
|
|
from os.path import basename |
|
from os.path import splitext |
|
|
|
def style_transform(h, w): |
|
k = (h, w) |
|
size = int(np.max(k)) |
|
print(type(size)) |
|
transform_list = [] |
|
transform_list.append(transforms.CenterCrop((h, w))) |
|
transform_list.append(transforms.ToTensor()) |
|
transform = transforms.Compose(transform_list) |
|
return transform |
|
|
|
|
|
def content_transform(): |
|
transform_list = [] |
|
transform_list.append(transforms.ToTensor()) |
|
transform = transforms.Compose(transform_list) |
|
return transform |
|
|
|
|
|
|
|
content_size = 512 |
|
style_size = 512 |
|
crop = 'store_true' |
|
save_ext = '.jpg' |
|
output_path = opt.output_dir |
|
preserve_color = 'store_true' |
|
alpha = opt.a |
|
|
|
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
if opt.content: |
|
content_paths = [Path(opt.content)] |
|
else: |
|
content_dir = Path(opt.content_dir) |
|
content_paths = [f for f in content_dir.glob('*')] |
|
|
|
|
|
if opt.style: |
|
style_paths = [Path(opt.style)] |
|
else: |
|
style_dir = Path(opt.style_dir) |
|
style_paths = [f for f in style_dir.glob('*')] |
|
|
|
if not os.path.exists(output_path): |
|
os.mkdir(output_path) |
|
|
|
network=torch.load(opt.network_path) |
|
network = Pix2PixModel(opt) |
|
print(network) |
|
network.eval() |
|
network.to(device) |
|
|
|
content_tf = test_transform(content_size, crop) |
|
style_tf = test_transform(style_size, crop) |
|
import torch.nn.functional as F |
|
|
|
|
|
|
|
for content_path in content_paths: |
|
for style_path in style_paths: |
|
print(content_path) |
|
|
|
content_tf1 = content_transform() |
|
content = content_tf(Image.open(content_path).convert("RGB")) |
|
|
|
h, w, c = np.shape(content) |
|
style_tf1 = style_transform(h, w) |
|
style = style_tf(Image.open(style_path).convert("RGB")) |
|
|
|
style = style.to(device).unsqueeze(0) |
|
content = content.to(device).unsqueeze(0) |
|
|
|
contents = F.interpolate(content, size=(224, 224), mode='bilinear', align_corners=False) |
|
styles = F.interpolate(style, size=(224, 224), mode='bilinear', align_corners=False) |
|
|
|
model_out = network(data=None, mode="inference",iters=0,progress=None,epochs=None,images_iters=None) |
|
with torch.no_grad(): |
|
|
|
_, _, _, _, output = model_out(contents, styles) |
|
print("OUTPUT",output.shape) |
|
upsample_layer = nn.Sequential(nn.Upsample(scale_factor=8 / 7, mode='bilinear', align_corners=False)) |
|
fake_image = upsample_layer(output) |
|
|
|
output_name = '{:s}/{:s}_stylized_{:s}{:s}'.format( |
|
output_path, splitext(basename(content_path))[0], |
|
splitext(basename(style_path))[0], save_ext |
|
) |
|
|
|
save_image(fake_image, output_name) |