Delta-Vector commited on
Commit
43264b3
·
verified ·
1 Parent(s): f20d2ad

Update sdprompter2.yaml

Browse files
Files changed (1) hide show
  1. sdprompter2.yaml +101 -99
sdprompter2.yaml CHANGED
@@ -1,99 +1,101 @@
1
- base_model: Delta-Vector/Holland-4B
2
- model_type: AutoModelForCausalLM
3
- tokenizer_type: AutoTokenizer
4
-
5
- trust_remote_code: true
6
-
7
- load_in_8bit: false
8
- load_in_4bit: false
9
- strict: false
10
-
11
- datasets:
12
- - path: NewEden/CivitAI-SD-Prompts
13
- # type:
14
- # system_prompt: ""
15
- # system_format: "<|im_start|>system\n{system}<|im_end|>\n"
16
- # field_system: instruction
17
- # field_instruction: input
18
- # field_input: ""
19
- # field_output: output
20
- # no_input_format: "<|im_start|>user\n{instruction}<|im_end|>\n<|im_start|>assistant\n"
21
-
22
- # system_prompt: ""
23
- # field_instruction: instruction
24
- # field_input: input
25
- # field_output: output
26
- # format: |-
27
- # <|im_start|>system
28
- # {instruction}<|im_end|>
29
- # <|im_start|>user
30
- # {input}<|im_end|>
31
- # <|im_start|>assistant
32
- # {output}
33
-
34
- type: alpaca
35
- conversation: mpt-30b-instruct
36
- # field_system: instruction
37
- # field_instruction: input
38
- # field_input: input
39
- # field_output: output
40
- chat_template: alpaca
41
-
42
- dataset_prepared_path:
43
- val_set_size: 0.02
44
- output_dir: ./outputs/out2
45
- sequence_len: 8192
46
- sample_packing: true
47
- eval_sample_packing: true
48
- pad_to_sequence_len: true
49
-
50
- plugins:
51
- - axolotl.integrations.liger.LigerPlugin
52
- liger_rope: true
53
- liger_rms_norm: true
54
- liger_swiglu: true
55
- liger_fused_linear_cross_entropy: true
56
-
57
- adapter:
58
- lora_model_dir:
59
- lora_r:
60
- lora_alpha:
61
- lora_dropout:
62
- lora_target_linear: true
63
- lora_fan_in_fan_out:
64
-
65
- wandb_project: SDprompterV2
66
- wandb_entity:
67
- wandb_watch:
68
- wandb_name: SDprompterV2
69
- wandb_log_model:
70
-
71
- gradient_accumulation_steps: 32
72
- micro_batch_size: 1
73
- num_epochs: 2
74
- optimizer: adamw_torch
75
- lr_scheduler: cosine
76
- learning_rate: 0.00002
77
-
78
- train_on_inputs: false
79
- group_by_length: false
80
- bf16: auto
81
- fp16:
82
- tf32: true
83
-
84
- gradient_checkpointing: true
85
- gradient_checkpointing_kwargs:
86
- use_reentrant: false
87
- early_stopping_patience:
88
- resume_from_checkpoint:
89
- local_rank:
90
- logging_steps: 1
91
- xformers_attention:
92
- flash_attention: true
93
-
94
- warmup_ratio: 0.05
95
- evals_per_epoch: 4
96
- saves_per_epoch: 1
97
- debug:
98
- weight_decay: 0.0
99
- special_tokens:
 
 
 
1
+ base_model: Delta-Vector/Holland-4B
2
+ model_type: AutoModelForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ trust_remote_code: true
6
+
7
+ load_in_8bit: false
8
+ load_in_4bit: false
9
+ strict: false
10
+
11
+ datasets:
12
+ - path: NewEden/CivitAI-SD-Prompts
13
+ # type:
14
+ # system_prompt: ""
15
+ # system_format: "<|im_start|>system\n{system}<|im_end|>\n"
16
+ # field_system: instruction
17
+ # field_instruction: input
18
+ # field_input: ""
19
+ # field_output: output
20
+ # no_input_format: "<|im_start|>user\n{instruction}<|im_end|>\n<|im_start|>assistant\n"
21
+
22
+ # system_prompt: ""
23
+ # field_instruction: instruction
24
+ # field_input: input
25
+ # field_output: output
26
+ # format: |-
27
+ # <|im_start|>system
28
+ # {instruction}<|im_end|>
29
+ # <|im_start|>user
30
+ # {input}<|im_end|>
31
+ # <|im_start|>assistant
32
+ # {output}
33
+
34
+ type: alpaca
35
+ conversation: mpt-30b-instruct
36
+ # field_system: instruction
37
+ # field_instruction: input
38
+ # field_input: input
39
+ # field_output: output
40
+ chat_template: alpaca
41
+
42
+ dataset_prepared_path:
43
+ val_set_size: 0.02
44
+ output_dir: ./outputs/out2
45
+ sequence_len: 8192
46
+ sample_packing: true
47
+ eval_sample_packing: true
48
+ pad_to_sequence_len: true
49
+
50
+ plugins:
51
+ - axolotl.integrations.liger.LigerPlugin
52
+ liger_rope: true
53
+ liger_rms_norm: true
54
+ liger_swiglu: true
55
+ liger_fused_linear_cross_entropy: true
56
+
57
+ adapter:
58
+ lora_model_dir:
59
+ lora_r:
60
+ lora_alpha:
61
+ lora_dropout:
62
+ lora_target_linear: true
63
+ lora_fan_in_fan_out:
64
+
65
+ wandb_project: SDprompterV2
66
+ wandb_entity:
67
+ wandb_watch:
68
+ wandb_name: SDprompterV2
69
+ wandb_log_model:
70
+
71
+ gradient_accumulation_steps: 32
72
+ micro_batch_size: 1
73
+ num_epochs: 2
74
+ optimizer: adamw_torch
75
+ lr_scheduler: cosine
76
+ learning_rate: 0.00002
77
+
78
+ train_on_inputs: false
79
+ group_by_length: false
80
+ bf16: auto
81
+ fp16:
82
+ tf32: true
83
+
84
+ gradient_checkpointing: true
85
+ gradient_checkpointing_kwargs:
86
+ use_reentrant: false
87
+ early_stopping_patience:
88
+ resume_from_checkpoint:
89
+ local_rank:
90
+ logging_steps: 1
91
+ xformers_attention:
92
+ flash_attention: true
93
+
94
+ warmup_ratio: 0.05
95
+ evals_per_epoch: 4
96
+ saves_per_epoch: 1
97
+ debug:
98
+ weight_decay: 0.05
99
+ deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json
100
+ special_tokens:
101
+ pad_token: <|finetune_right_pad_id|>