Upload 3 files
Browse files
test-simpletuner/Training/tr_01/s01_config_01.json
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"--model_type": "lora",
|
3 |
+
"--lora_type": "standard",
|
4 |
+
"--pretrained_model_name_or_path": "black-forest-labs/FLUX.1-dev",
|
5 |
+
"--model_family": "flux",
|
6 |
+
"--data_backend_config": "/workspace/s01_multidatabackend.json",
|
7 |
+
"--output_dir": "/workspace/output/test-simpletuner/tr_01/",
|
8 |
+
"--user_prompt_library": "/workspace/s01_prompt_library.json",
|
9 |
+
"--hub_model_id": "simpletuner-lora-test-simpletuner_tr_01",
|
10 |
+
"--tracker_project_name": "simpletuner-lora-test-simpletuner_tr_01",
|
11 |
+
"--tracker_run_name": "tr_01",
|
12 |
+
"--seed": 151851,
|
13 |
+
"--lora_rank": 4,
|
14 |
+
"--lora_alpha": 4,
|
15 |
+
"--mixed_precision": "bf16",
|
16 |
+
"--optimizer": "adamw_bf16",
|
17 |
+
"--learning_rate": "2e-5",
|
18 |
+
"--train_batch_size": 2,
|
19 |
+
"--gradient_accumulation_steps": 2,
|
20 |
+
"--lr_scheduler": "cosine",
|
21 |
+
"--lr_warmup_steps": 100,
|
22 |
+
"--max_train_steps": 30000,
|
23 |
+
"--num_train_epochs": 0,
|
24 |
+
"--checkpointing_steps": 100,
|
25 |
+
"--base_model_precision": "fp8-quanto",
|
26 |
+
"--base_model_default_dtype": "bf16",
|
27 |
+
"--keep_vae_loaded": true,
|
28 |
+
"--flux_lora_target": "all+ffs",
|
29 |
+
"--gradient_precision": "fp32",
|
30 |
+
"--noise_offset": "0.125",
|
31 |
+
"--noise_offset_probability": "0.5",
|
32 |
+
"--checkpoints_total_limit": 20,
|
33 |
+
"--aspect_bucket_rounding": 4,
|
34 |
+
"--minimum_image_size": 0,
|
35 |
+
"--resume_from_checkpoint": "latest",
|
36 |
+
"--push_to_hub": "false",
|
37 |
+
"--push_checkpoints_to_hub": "false",
|
38 |
+
"--report_to": "wandb",
|
39 |
+
"--metadata_update_interval": 60,
|
40 |
+
"--gradient_checkpointing": "true",
|
41 |
+
"--caption_dropout_probability": "0.15",
|
42 |
+
"--resolution_type": "pixel_area",
|
43 |
+
"--resolution": 768,
|
44 |
+
"--validation_seed": 10,
|
45 |
+
"--validation_steps": 100,
|
46 |
+
"--validation_resolution": "512x768",
|
47 |
+
"--validation_guidance": "3.5",
|
48 |
+
"--validation_guidance_rescale": "0.0",
|
49 |
+
"--validation_num_inference_steps": "20",
|
50 |
+
"--validation_prompt": "yellow color",
|
51 |
+
"--num_validation_images": 1,
|
52 |
+
"--validation_torch_compile": "false",
|
53 |
+
"--disable_benchmark": "false",
|
54 |
+
"--snr_gamma": 5,
|
55 |
+
"--inference_scheduler_timestep_spacing": "trailing",
|
56 |
+
"--training_scheduler_timestep_spacing": "trailing",
|
57 |
+
"--max_workers": 32,
|
58 |
+
"--read_batch_size": 25,
|
59 |
+
"--write_batch_size": 64,
|
60 |
+
"--torch_num_threads": 8,
|
61 |
+
"--image_processing_batch_size": 32,
|
62 |
+
"--vae_batch_size": 4,
|
63 |
+
"--compress_disk_cache": true,
|
64 |
+
"--max_grad_norm": "0.04",
|
65 |
+
"--disable_bucket_pruning": false,
|
66 |
+
"--delete_problematic_images": true
|
67 |
+
}
|
test-simpletuner/Training/tr_01/s01_multidatabackend.json
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"id": "all_dataset_768",
|
4 |
+
"type": "local",
|
5 |
+
"instance_data_dir": "/workspace/input/dataset",
|
6 |
+
"crop": false,
|
7 |
+
"crop_style": "random",
|
8 |
+
"crop_aspect": "preserve",
|
9 |
+
"resolution": 768,
|
10 |
+
"resolution_type": "pixel_area",
|
11 |
+
"minimum_image_size": 150,
|
12 |
+
"maximum_image_size": 1024,
|
13 |
+
"target_downsample_size":768,
|
14 |
+
"prepend_instance_prompt": false,
|
15 |
+
"instance_prompt": null,
|
16 |
+
"only_instance_prompt": false,
|
17 |
+
"caption_strategy": "textfile",
|
18 |
+
"cache_dir_vae": "/workspace/cache/cache_vae_768/",
|
19 |
+
"vae_cache_clear_each_epoch": false,
|
20 |
+
"probability": 1.0,
|
21 |
+
"repeats": 3,
|
22 |
+
"disabled": false,
|
23 |
+
"text_embeds": "alt-embed-cache",
|
24 |
+
"skip_file_discovery": "",
|
25 |
+
"preserve_data_backend_cache": false,
|
26 |
+
"metadata_backend": "json",
|
27 |
+
"cache_file_suffix": "_768"
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"id": "all_dataset_512",
|
31 |
+
"type": "local",
|
32 |
+
"instance_data_dir": "/workspace/input/dataset",
|
33 |
+
"crop": false,
|
34 |
+
"crop_style": "random",
|
35 |
+
"crop_aspect": "preserve",
|
36 |
+
"resolution": 512,
|
37 |
+
"resolution_type": "pixel_area",
|
38 |
+
"minimum_image_size": 115,
|
39 |
+
"maximum_image_size": 832,
|
40 |
+
"target_downsample_size": 512,
|
41 |
+
"prepend_instance_prompt": false,
|
42 |
+
"instance_prompt": null,
|
43 |
+
"only_instance_prompt": false,
|
44 |
+
"caption_strategy": "textfile",
|
45 |
+
"cache_dir_vae": "/workspace/cache/cache_vae_512/",
|
46 |
+
"vae_cache_clear_each_epoch": false,
|
47 |
+
"probability": 1.0,
|
48 |
+
"repeats": 3,
|
49 |
+
"disabled": false,
|
50 |
+
"text_embeds": "alt-embed-cache",
|
51 |
+
"skip_file_discovery": "",
|
52 |
+
"preserve_data_backend_cache": false,
|
53 |
+
"metadata_backend": "json",
|
54 |
+
"cache_file_suffix": "_512"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"id": "alt-embed-cache",
|
58 |
+
"type": "local",
|
59 |
+
"dataset_type": "text_embeds",
|
60 |
+
"default": true,
|
61 |
+
"cache_dir": "/workspace/cache/text_embeds/",
|
62 |
+
"disabled": false,
|
63 |
+
"write_batch_size": 128
|
64 |
+
}
|
65 |
+
]
|
test-simpletuner/Training/tr_01/s01_prompt_library.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"black": "black color",
|
3 |
+
"green": "green color",
|
4 |
+
"blue": "blue color",
|
5 |
+
"red": "red color"
|
6 |
+
}
|