Upload 3 files
Browse files- configs/data_configs.py +41 -0
- configs/paths_config.py +28 -0
- configs/transforms_config.py +62 -0
configs/data_configs.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from configs import transforms_config
|
2 |
+
from configs.paths_config import dataset_paths
|
3 |
+
|
4 |
+
|
5 |
+
DATASETS = {
|
6 |
+
'ffhq_encode': {
|
7 |
+
'transforms': transforms_config.EncodeTransforms,
|
8 |
+
'train_source_root': dataset_paths['ffhq'],
|
9 |
+
'train_target_root': dataset_paths['ffhq'],
|
10 |
+
'test_source_root': dataset_paths['celeba_test'],
|
11 |
+
'test_target_root': dataset_paths['celeba_test'],
|
12 |
+
},
|
13 |
+
'cars_encode': {
|
14 |
+
'transforms': transforms_config.CarsEncodeTransforms,
|
15 |
+
'train_source_root': dataset_paths['cars_train'],
|
16 |
+
'train_target_root': dataset_paths['cars_train'],
|
17 |
+
'test_source_root': dataset_paths['cars_test'],
|
18 |
+
'test_target_root': dataset_paths['cars_test'],
|
19 |
+
},
|
20 |
+
'horse_encode': {
|
21 |
+
'transforms': transforms_config.EncodeTransforms,
|
22 |
+
'train_source_root': dataset_paths['horse_train'],
|
23 |
+
'train_target_root': dataset_paths['horse_train'],
|
24 |
+
'test_source_root': dataset_paths['horse_test'],
|
25 |
+
'test_target_root': dataset_paths['horse_test'],
|
26 |
+
},
|
27 |
+
'church_encode': {
|
28 |
+
'transforms': transforms_config.EncodeTransforms,
|
29 |
+
'train_source_root': dataset_paths['church_train'],
|
30 |
+
'train_target_root': dataset_paths['church_train'],
|
31 |
+
'test_source_root': dataset_paths['church_test'],
|
32 |
+
'test_target_root': dataset_paths['church_test'],
|
33 |
+
},
|
34 |
+
'cats_encode': {
|
35 |
+
'transforms': transforms_config.EncodeTransforms,
|
36 |
+
'train_source_root': dataset_paths['cats_train'],
|
37 |
+
'train_target_root': dataset_paths['cats_train'],
|
38 |
+
'test_source_root': dataset_paths['cats_test'],
|
39 |
+
'test_target_root': dataset_paths['cats_test'],
|
40 |
+
}
|
41 |
+
}
|
configs/paths_config.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dataset_paths = {
|
2 |
+
# Face Datasets (In the paper: FFHQ - train, CelebAHQ - test)
|
3 |
+
'ffhq': '',
|
4 |
+
'celeba_test': '',
|
5 |
+
|
6 |
+
# Cars Dataset (In the paper: Stanford cars)
|
7 |
+
'cars_train': '',
|
8 |
+
'cars_test': '',
|
9 |
+
|
10 |
+
# Horse Dataset (In the paper: LSUN Horse)
|
11 |
+
'horse_train': '',
|
12 |
+
'horse_test': '',
|
13 |
+
|
14 |
+
# Church Dataset (In the paper: LSUN Church)
|
15 |
+
'church_train': '',
|
16 |
+
'church_test': '',
|
17 |
+
|
18 |
+
# Cats Dataset (In the paper: LSUN Cat)
|
19 |
+
'cats_train': '',
|
20 |
+
'cats_test': ''
|
21 |
+
}
|
22 |
+
|
23 |
+
model_paths = {
|
24 |
+
'stylegan_ffhq': 'pretrained_models/stylegan2-ffhq-config-f.pt',
|
25 |
+
'ir_se50': 'pretrained_models/model_ir_se50.pth',
|
26 |
+
'shape_predictor': 'pretrained_models/shape_predictor_68_face_landmarks.dat',
|
27 |
+
'moco': 'pretrained_models/moco_v2_800ep_pretrain.pth'
|
28 |
+
}
|
configs/transforms_config.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import abstractmethod
|
2 |
+
import torchvision.transforms as transforms
|
3 |
+
|
4 |
+
|
5 |
+
class TransformsConfig(object):
|
6 |
+
|
7 |
+
def __init__(self, opts):
|
8 |
+
self.opts = opts
|
9 |
+
|
10 |
+
@abstractmethod
|
11 |
+
def get_transforms(self):
|
12 |
+
pass
|
13 |
+
|
14 |
+
|
15 |
+
class EncodeTransforms(TransformsConfig):
|
16 |
+
|
17 |
+
def __init__(self, opts):
|
18 |
+
super(EncodeTransforms, self).__init__(opts)
|
19 |
+
|
20 |
+
def get_transforms(self):
|
21 |
+
transforms_dict = {
|
22 |
+
'transform_gt_train': transforms.Compose([
|
23 |
+
transforms.Resize((256, 256)),
|
24 |
+
transforms.RandomHorizontalFlip(0.5),
|
25 |
+
transforms.ToTensor(),
|
26 |
+
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
27 |
+
'transform_source': None,
|
28 |
+
'transform_test': transforms.Compose([
|
29 |
+
transforms.Resize((256, 256)),
|
30 |
+
transforms.ToTensor(),
|
31 |
+
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
32 |
+
'transform_inference': transforms.Compose([
|
33 |
+
transforms.Resize((256, 256)),
|
34 |
+
transforms.ToTensor(),
|
35 |
+
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
|
36 |
+
}
|
37 |
+
return transforms_dict
|
38 |
+
|
39 |
+
|
40 |
+
class CarsEncodeTransforms(TransformsConfig):
|
41 |
+
|
42 |
+
def __init__(self, opts):
|
43 |
+
super(CarsEncodeTransforms, self).__init__(opts)
|
44 |
+
|
45 |
+
def get_transforms(self):
|
46 |
+
transforms_dict = {
|
47 |
+
'transform_gt_train': transforms.Compose([
|
48 |
+
transforms.Resize((192, 256)),
|
49 |
+
transforms.RandomHorizontalFlip(0.5),
|
50 |
+
transforms.ToTensor(),
|
51 |
+
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
52 |
+
'transform_source': None,
|
53 |
+
'transform_test': transforms.Compose([
|
54 |
+
transforms.Resize((192, 256)),
|
55 |
+
transforms.ToTensor(),
|
56 |
+
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
|
57 |
+
'transform_inference': transforms.Compose([
|
58 |
+
transforms.Resize((192, 256)),
|
59 |
+
transforms.ToTensor(),
|
60 |
+
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
|
61 |
+
}
|
62 |
+
return transforms_dict
|