adam_beta1: 0.9 | |
adam_beta2: 0.999 | |
adam_epsilon: 1.0e-08 | |
adam_weight_decay: 0.01 | |
allow_tf32: false | |
cache_dir: null | |
center_crop: true | |
checkpointing_steps: 5000 | |
checkpoints_total_limit: null | |
dataloader_num_workers: 0 | |
dataset_config_name: null | |
enable_xformers_memory_efficient_attention: false | |
gradient_accumulation_steps: 4 | |
gradient_checkpointing: true | |
lambda_kd_feat: 1.0 | |
lambda_kd_output: 1.0 | |
lambda_sd: 1.0 | |
learning_rate: 5.0e-05 | |
local_rank: -1 | |
logging_dir: logs | |
lr_scheduler: constant | |
lr_warmup_steps: 0 | |
max_grad_norm: 1.0 | |
max_train_samples: null | |
max_train_steps: 0 | |
mixed_precision: fp16 | |
non_ema_revision: null | |
num_train_epochs: 0 | |
num_valid_images: 2 | |
output_dir: ./results/kd_bk_tiny | |
pretrained_model_name_or_path: CompVis/stable-diffusion-v1-4 | |
random_flip: true | |
report_to: all | |
resolution: 512 | |
resume_from_checkpoint: null | |
revision: null | |
scale_lr: false | |
seed: 1234 | |
train_batch_size: 64 | |
train_data_dir: ./data/laion_aes/preprocessed_11k | |
unet_config_name: bk_tiny | |
unet_config_path: ./src/unet_config | |
use_8bit_adam: false | |
use_copy_weight_from_teacher: true | |
use_ema: true | |
valid_prompt: a golden vase with different flowers | |
valid_steps: 500 | |