adam_beta1: 0.9 adam_beta2: 0.999 adam_epsilon: 1.0e-08 adam_weight_decay: 0.01 allow_tf32: false center_crop: false checkpointing_steps: 100 checkpoints_total_limit: null class_data_dir: null class_labels_conditioning: null class_prompt: null dataloader_num_workers: 0 enable_xformers_memory_efficient_attention: false gradient_accumulation_steps: 1 gradient_checkpointing: false hub_model_id: null hub_token: null instance_data_dir: /content/full_caption_rmtxt_dataset instance_prompt: "\uBC30\uBD88\uB7EC" learning_rate: 1.0e-06 local_rank: 0 logging_dir: logs lr_num_cycles: 1 lr_power: 1.0 lr_scheduler: constant lr_warmup_steps: 0 max_grad_norm: 1.0 max_train_steps: 1000 mixed_precision: 'no' num_class_images: 100 num_train_epochs: 1 num_validation_images: 4 output_dir: LoRA_Any_0to1000 pre_compute_text_embeddings: false pretrained_model_name_or_path: kyujinpy/KO-anything-v4-5 prior_generation_precision: null prior_loss_weight: 1.0 push_to_hub: true rank: 4 report_to: tensorboard resolution: 768 resume_from_checkpoint: null revision: null sample_batch_size: 4 scale_lr: false seed: 0 text_encoder_use_attention_mask: false tokenizer_max_length: null tokenizer_name: null train_batch_size: 1 train_text_encoder: true use_8bit_adam: false validation_epochs: 50 validation_prompt: "\uBC30\uBD88\uB7EC" variant: null with_prior_preservation: false