feat: upload lora model
Browse files- _config/config_file.toml +5 -5
- _config/sample_prompt.toml +1 -1
_config/config_file.toml
CHANGED
@@ -7,7 +7,7 @@ shuffle_caption = true
|
|
7 |
lowram = true
|
8 |
|
9 |
[model_arguments]
|
10 |
-
pretrained_model_name_or_path = "Laxhar/noobai-XL-1.
|
11 |
vae = "/content/vae/sdxl_vae.safetensors"
|
12 |
|
13 |
[dataset_arguments]
|
@@ -15,15 +15,15 @@ debug_dataset = false
|
|
15 |
in_json = "/content/LoRA/meta_lat.json"
|
16 |
train_data_dir = "/content/LoRA/train_data"
|
17 |
dataset_repeats = 1
|
18 |
-
keep_tokens =
|
19 |
resolution = "1024,1024"
|
20 |
color_aug = false
|
21 |
token_warmup_min = 1
|
22 |
token_warmup_step = 0
|
23 |
|
24 |
[training_arguments]
|
25 |
-
output_dir = "/content/LoRA/output/
|
26 |
-
output_name = "
|
27 |
save_precision = "fp16"
|
28 |
save_every_n_epochs = 2
|
29 |
train_batch_size = 4
|
@@ -41,7 +41,7 @@ mixed_precision = "fp16"
|
|
41 |
[logging_arguments]
|
42 |
log_with = "tensorboard"
|
43 |
logging_dir = "/content/LoRA/logs"
|
44 |
-
log_prefix = "
|
45 |
|
46 |
[sample_prompt_arguments]
|
47 |
sample_every_n_epochs = 1
|
|
|
7 |
lowram = true
|
8 |
|
9 |
[model_arguments]
|
10 |
+
pretrained_model_name_or_path = "Laxhar/noobai-XL-1.1"
|
11 |
vae = "/content/vae/sdxl_vae.safetensors"
|
12 |
|
13 |
[dataset_arguments]
|
|
|
15 |
in_json = "/content/LoRA/meta_lat.json"
|
16 |
train_data_dir = "/content/LoRA/train_data"
|
17 |
dataset_repeats = 1
|
18 |
+
keep_tokens = 1
|
19 |
resolution = "1024,1024"
|
20 |
color_aug = false
|
21 |
token_warmup_min = 1
|
22 |
token_warmup_step = 0
|
23 |
|
24 |
[training_arguments]
|
25 |
+
output_dir = "/content/LoRA/output/bamakipajamas"
|
26 |
+
output_name = "bamakipajamas"
|
27 |
save_precision = "fp16"
|
28 |
save_every_n_epochs = 2
|
29 |
train_batch_size = 4
|
|
|
41 |
[logging_arguments]
|
42 |
log_with = "tensorboard"
|
43 |
logging_dir = "/content/LoRA/logs"
|
44 |
+
log_prefix = "bamakipajamas"
|
45 |
|
46 |
[sample_prompt_arguments]
|
47 |
sample_every_n_epochs = 1
|
_config/sample_prompt.toml
CHANGED
@@ -5,5 +5,5 @@ height = 1024
|
|
5 |
scale = 12
|
6 |
sample_steps = 28
|
7 |
[[prompt.subset]]
|
8 |
-
prompt = "masterpiece,best quality,1girl"
|
9 |
|
|
|
5 |
scale = 12
|
6 |
sample_steps = 28
|
7 |
[[prompt.subset]]
|
8 |
+
prompt = "masterpiece,best quality,bamakipajamas,1girl"
|
9 |
|