Delta-Vector commited on
Commit
9eb091a
·
verified ·
1 Parent(s): aa4fcf7

Upload Untitled-1.yaml

Browse files
Files changed (1) hide show
  1. Untitled-1.yaml +140 -0
Untitled-1.yaml ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: allura-org/TQ2.5-14B-Sugarquill-v1
2
+ strict: false
3
+
4
+ plugins:
5
+ - axolotl.integrations.liger.LigerPlugin
6
+ liger_rope: true
7
+ liger_rms_norm: true
8
+ liger_swiglu: true
9
+ liger_fused_linear_cross_entropy: true
10
+
11
+ # Output and HuggingFace
12
+ hub_model_id: NewEden/control-14b-lora
13
+ hf_use_auth_token: true
14
+ hub_strategy: "all_checkpoints"
15
+
16
+ wandb_project: huggingface
17
+ wandb_entity:
18
+ wandb_name: Control-14B
19
+
20
+
21
+ chat_template: chatml
22
+ group_by_length: false
23
+ datasets:
24
+ - path: Nitral-AI/Creative_Writing-ShareGPT
25
+ type: chat_template
26
+ roles_to_train: ["gpt"]
27
+ field_messages: conversations
28
+ message_field_role: from
29
+ message_field_content: value
30
+ train_on_eos: turn
31
+ - path: Nitral-AI/ARES-ShareGPT
32
+ type: chat_template
33
+ chat_template: chatml
34
+ roles_to_train: ["gpt"]
35
+ field_messages: conversations
36
+ message_field_role: from
37
+ message_field_content: value
38
+ train_on_eos: turn
39
+ - path: NewEden/Claude-Instruct-5K
40
+ type: chat_template
41
+ chat_template: chatml
42
+ roles_to_train: ["gpt"]
43
+ field_messages: conversations
44
+ message_field_role: from
45
+ message_field_content: value
46
+ train_on_eos: turn
47
+ - path: NewEden/OpenCAI-ShareGPT
48
+ type: chat_template
49
+ roles_to_train: ["gpt"]
50
+ field_messages: conversations
51
+ message_field_role: from
52
+ message_field_content: value
53
+ train_on_eos: turn
54
+ - path: NewEden/PIPPA-Mega-Filtered
55
+ type: chat_template
56
+ chat_template: chatml
57
+ roles_to_train: ["gpt"]
58
+ field_messages: conversations
59
+ message_field_role: from
60
+ message_field_content: value
61
+ train_on_eos: turn
62
+ - path: NewEden/Roleplay-Logs-Sharegpt-Ngram-cleaned
63
+ type: chat_template
64
+ chat_template: chatml
65
+ roles_to_train: ["gpt"]
66
+ field_messages: conversations
67
+ message_field_role: from
68
+ message_field_content: value
69
+ train_on_eos: turn
70
+ - path: Nitral-AI/Creative_Writing-ShareGPT
71
+ type: chat_template
72
+ chat_template: chatml
73
+ roles_to_train: ["gpt"]
74
+ field_messages: conversations
75
+ message_field_role: from
76
+ message_field_content: value
77
+ train_on_eos: turn
78
+
79
+ val_set_size: 0.01
80
+ evals_per_epoch: 4
81
+ eval_table_size:
82
+ eval_max_new_tokens: 128
83
+
84
+ sequence_len: 8192
85
+ save_safetensors: true
86
+ saves_per_epoch: 2
87
+ logging_steps: 1
88
+ special_tokens:
89
+
90
+ # Quantization
91
+ bf16: auto
92
+ fp16:
93
+ tf32: false
94
+ ## For LoRA
95
+ load_in_8bit: false
96
+ load_in_4bit: True
97
+
98
+ # LoRA
99
+ peft_use_rslora: true
100
+ adapter: qlora
101
+ lora_model_dir:
102
+ lora_r: 128
103
+ lora_alpha: 16
104
+ lora_dropout: 0.05
105
+ lora_target_linear: true
106
+ lora_fan_in_fan_out:
107
+ lora_target_modules:
108
+
109
+ ## if oom
110
+ # lora_r: 64
111
+ # lora_alpha: 32
112
+ # lora_dropout: 0.1
113
+
114
+
115
+ weight_decay: 0.02
116
+ max_grad_norm: 1.0
117
+
118
+ warmup_ratio: 0.05
119
+ learning_rate: 0.00002
120
+ lr_scheduler: cosine
121
+ #lr_scheduler_kwargs:
122
+ optimizer: paged_adamw_8bit # usually adamw_torch or paged_adamw_8bit
123
+
124
+ ## Batch Size
125
+ gradient_accumulation_steps: 8
126
+ micro_batch_size: 1
127
+ eval_batch_size: 1
128
+
129
+ # Optimizations
130
+ pad_to_sequence_len: true
131
+ sample_packing: true
132
+ eval_sample_packing: false
133
+ flash_attention: true
134
+ xformers_attention:
135
+ gradient_checkpointing: "unsloth"
136
+ gradient_checkpointing_kwargs:
137
+ use_reentrant: true
138
+ local_rank:
139
+ early_stopping_patience:
140
+ debug: