Upload 2 files
Browse files- args.json +60 -0
- v1-inference.yaml +70 -0
args.json
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5",
|
3 |
+
"pretrained_vae_name_or_path": "stabilityai/sd-vae-ft-mse",
|
4 |
+
"revision": "fp16",
|
5 |
+
"tokenizer_name": null,
|
6 |
+
"instance_data_dir": null,
|
7 |
+
"class_data_dir": null,
|
8 |
+
"instance_prompt": null,
|
9 |
+
"class_prompt": null,
|
10 |
+
"save_sample_prompt": "photo of melaura person",
|
11 |
+
"save_sample_negative_prompt": null,
|
12 |
+
"n_save_sample": 4,
|
13 |
+
"save_guidance_scale": 7.5,
|
14 |
+
"save_infer_steps": 20,
|
15 |
+
"pad_tokens": false,
|
16 |
+
"with_prior_preservation": true,
|
17 |
+
"prior_loss_weight": 1.0,
|
18 |
+
"num_class_images": 460,
|
19 |
+
"output_dir": "/content/stable_diffusion_models/melaura",
|
20 |
+
"seed": 1337,
|
21 |
+
"resolution": 512,
|
22 |
+
"center_crop": false,
|
23 |
+
"train_text_encoder": true,
|
24 |
+
"train_batch_size": 1,
|
25 |
+
"sample_batch_size": 4,
|
26 |
+
"num_train_epochs": 11,
|
27 |
+
"max_train_steps": 5040,
|
28 |
+
"gradient_accumulation_steps": 1,
|
29 |
+
"gradient_checkpointing": false,
|
30 |
+
"learning_rate": 1e-06,
|
31 |
+
"scale_lr": false,
|
32 |
+
"lr_scheduler": "constant",
|
33 |
+
"lr_warmup_steps": 0,
|
34 |
+
"use_8bit_adam": true,
|
35 |
+
"adam_beta1": 0.9,
|
36 |
+
"adam_beta2": 0.999,
|
37 |
+
"adam_weight_decay": 0.01,
|
38 |
+
"adam_epsilon": 1e-08,
|
39 |
+
"max_grad_norm": 1.0,
|
40 |
+
"push_to_hub": false,
|
41 |
+
"hub_token": null,
|
42 |
+
"hub_model_id": null,
|
43 |
+
"logging_dir": "logs",
|
44 |
+
"log_interval": 10,
|
45 |
+
"save_interval": 10000,
|
46 |
+
"save_min_steps": 0,
|
47 |
+
"mixed_precision": "fp16",
|
48 |
+
"not_cache_latents": false,
|
49 |
+
"hflip": false,
|
50 |
+
"local_rank": -1,
|
51 |
+
"concepts_list": [
|
52 |
+
{
|
53 |
+
"instance_prompt": "photo of melaura person",
|
54 |
+
"class_prompt": "photo of person",
|
55 |
+
"instance_data_dir": "/content/data/training_images",
|
56 |
+
"class_data_dir": "/content/data/person"
|
57 |
+
}
|
58 |
+
],
|
59 |
+
"read_prompts_from_txts": false
|
60 |
+
}
|
v1-inference.yaml
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
use_ema: False
|
19 |
+
|
20 |
+
scheduler_config: # 10000 warmup steps
|
21 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
22 |
+
params:
|
23 |
+
warm_up_steps: [ 10000 ]
|
24 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
25 |
+
f_start: [ 1.e-6 ]
|
26 |
+
f_max: [ 1. ]
|
27 |
+
f_min: [ 1. ]
|
28 |
+
|
29 |
+
unet_config:
|
30 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
31 |
+
params:
|
32 |
+
image_size: 32 # unused
|
33 |
+
in_channels: 4
|
34 |
+
out_channels: 4
|
35 |
+
model_channels: 320
|
36 |
+
attention_resolutions: [ 4, 2, 1 ]
|
37 |
+
num_res_blocks: 2
|
38 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 768
|
43 |
+
use_checkpoint: True
|
44 |
+
legacy: False
|
45 |
+
|
46 |
+
first_stage_config:
|
47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
48 |
+
params:
|
49 |
+
embed_dim: 4
|
50 |
+
monitor: val/rec_loss
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|