Commit
·
8ee3c60
1
Parent(s):
9f2baec
Upload sd-superres-config.yaml with huggingface_hub
Browse files- sd-superres-config.yaml +181 -0
sd-superres-config.yaml
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-05
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentUpscaleDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: "hybrid-adm"
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
low_scale_key: "lr"
|
19 |
+
|
20 |
+
low_scale_config:
|
21 |
+
target: ldm.modules.encoders.modules.LowScaleEncoder
|
22 |
+
params:
|
23 |
+
scale_factor: 0.18215
|
24 |
+
linear_start: 0.00085
|
25 |
+
linear_end: 0.0120
|
26 |
+
timesteps: 1000
|
27 |
+
max_noise_level: 100
|
28 |
+
output_size: null
|
29 |
+
model_config:
|
30 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
31 |
+
params:
|
32 |
+
embed_dim: 4
|
33 |
+
monitor: val/rec_loss
|
34 |
+
ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
|
35 |
+
ddconfig:
|
36 |
+
double_z: true
|
37 |
+
z_channels: 4
|
38 |
+
resolution: 256
|
39 |
+
in_channels: 3
|
40 |
+
out_ch: 3
|
41 |
+
ch: 128
|
42 |
+
ch_mult:
|
43 |
+
- 1
|
44 |
+
- 2
|
45 |
+
- 4
|
46 |
+
- 4
|
47 |
+
num_res_blocks: 2
|
48 |
+
attn_resolutions: [ ]
|
49 |
+
dropout: 0.0
|
50 |
+
lossconfig:
|
51 |
+
target: torch.nn.Identity
|
52 |
+
|
53 |
+
scheduler_config: # 10000 warmup steps
|
54 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
55 |
+
params:
|
56 |
+
warm_up_steps: [ 5000 ] # NOTE for resuming. use 10000 if starting from scratch
|
57 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
58 |
+
f_start: [ 1.e-6 ]
|
59 |
+
f_max: [ 1. ]
|
60 |
+
f_min: [ 1. ]
|
61 |
+
|
62 |
+
unet_config:
|
63 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
64 |
+
params:
|
65 |
+
num_classes: 1000
|
66 |
+
image_size: 32 # unused
|
67 |
+
in_channels: 8
|
68 |
+
out_channels: 4
|
69 |
+
model_channels: 320
|
70 |
+
attention_resolutions: [ 4, 2, 1 ]
|
71 |
+
num_res_blocks: 2
|
72 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
73 |
+
num_heads: 8
|
74 |
+
use_spatial_transformer: True
|
75 |
+
transformer_depth: 1
|
76 |
+
context_dim: 768
|
77 |
+
use_checkpoint: True
|
78 |
+
legacy: False
|
79 |
+
|
80 |
+
first_stage_config:
|
81 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
82 |
+
params:
|
83 |
+
embed_dim: 4
|
84 |
+
monitor: val/rec_loss
|
85 |
+
ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
|
86 |
+
ddconfig:
|
87 |
+
double_z: true
|
88 |
+
z_channels: 4
|
89 |
+
resolution: 256
|
90 |
+
in_channels: 3
|
91 |
+
out_ch: 3
|
92 |
+
ch: 128
|
93 |
+
ch_mult:
|
94 |
+
- 1
|
95 |
+
- 2
|
96 |
+
- 4
|
97 |
+
- 4
|
98 |
+
num_res_blocks: 2
|
99 |
+
attn_resolutions: []
|
100 |
+
dropout: 0.0
|
101 |
+
lossconfig:
|
102 |
+
target: torch.nn.Identity
|
103 |
+
|
104 |
+
cond_stage_config:
|
105 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
106 |
+
|
107 |
+
|
108 |
+
data:
|
109 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
110 |
+
params:
|
111 |
+
tar_base: "pipe:ssh -i ~/.ssh/id_rsa [email protected] cat /mnt/data_rome/laion/improved_aesthetics_6plus/ims"
|
112 |
+
batch_size: 3
|
113 |
+
num_workers: 2
|
114 |
+
multinode: True
|
115 |
+
train:
|
116 |
+
shards: '{00000..01209}.tar'
|
117 |
+
shuffle: 10000
|
118 |
+
image_key: jpg
|
119 |
+
image_transforms:
|
120 |
+
- target: torchvision.transforms.Resize
|
121 |
+
params:
|
122 |
+
size: 512
|
123 |
+
interpolation: 3
|
124 |
+
- target: torchvision.transforms.RandomCrop
|
125 |
+
params:
|
126 |
+
size: 512
|
127 |
+
postprocess:
|
128 |
+
target: ldm.data.laion.AddLR
|
129 |
+
params:
|
130 |
+
factor: 4
|
131 |
+
output_size: 512
|
132 |
+
|
133 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
134 |
+
validation:
|
135 |
+
shards: '{00000..00012}.tar'
|
136 |
+
shuffle: 0
|
137 |
+
image_key: jpg
|
138 |
+
image_transforms:
|
139 |
+
- target: torchvision.transforms.Resize
|
140 |
+
params:
|
141 |
+
size: 512
|
142 |
+
interpolation: 3
|
143 |
+
- target: torchvision.transforms.CenterCrop
|
144 |
+
params:
|
145 |
+
size: 512
|
146 |
+
postprocess:
|
147 |
+
target: ldm.data.laion.AddLR
|
148 |
+
params:
|
149 |
+
factor: 4
|
150 |
+
output_size: 512
|
151 |
+
|
152 |
+
|
153 |
+
lightning:
|
154 |
+
find_unused_parameters: False
|
155 |
+
|
156 |
+
modelcheckpoint:
|
157 |
+
params:
|
158 |
+
every_n_train_steps: 5000
|
159 |
+
|
160 |
+
callbacks:
|
161 |
+
image_logger:
|
162 |
+
target: main.ImageLogger
|
163 |
+
params:
|
164 |
+
batch_frequency: 1000
|
165 |
+
max_images: 4
|
166 |
+
increase_log_steps: False
|
167 |
+
log_first_step: False
|
168 |
+
log_images_kwargs:
|
169 |
+
use_ema_scope: False
|
170 |
+
inpaint: False
|
171 |
+
plot_progressive_rows: False
|
172 |
+
plot_diffusion_rows: False
|
173 |
+
N: 4
|
174 |
+
unconditional_guidance_scale: 3.0
|
175 |
+
unconditional_guidance_label: [""]
|
176 |
+
|
177 |
+
trainer:
|
178 |
+
benchmark: True
|
179 |
+
val_check_interval: 5000000 # really sorry
|
180 |
+
num_sanity_val_steps: 0
|
181 |
+
accumulate_grad_batches: 4
|