Delta-Vector commited on
Commit
0810124
·
verified ·
1 Parent(s): e5e53f5

Update Nemo.yml

Browse files
Files changed (1) hide show
  1. Nemo.yml +91 -94
Nemo.yml CHANGED
@@ -1,95 +1,92 @@
1
- base_model: Dans-DiscountModels/Mistral-NeMo-Minitron-8B-Base-ChatML
2
- model_type: AutoModelForCausalLM
3
- tokenizer_type: AutoTokenizer
4
-
5
- plugins:
6
- - axolotl.integrations.liger.LigerPlugin
7
- liger_rope: true
8
- liger_rms_norm: true
9
- #liger_swiglu: true
10
- #liger_cross_entropy: true
11
- #liger_fused_linear_cross_entropy: true
12
-
13
- load_in_8bit: false
14
- load_in_4bit: false
15
- strict: false
16
-
17
- datasets:
18
- - path: anthracite-core/c2_logs_32k_llama3_qwen2_v1.2
19
- type: sharegpt
20
- conversation: chatml
21
- - path: anthracite-org/kalo-opus-instruct-22k-no-refusal
22
- type: sharegpt
23
- conversation: chatml
24
- - path: lodrick-the-lafted/kalo-opus-instruct-3k-filtered
25
- type: sharegpt
26
- conversation: chatml
27
- - path: anthracite-org/nopm_claude_writing_fixed
28
- type: sharegpt
29
- conversation: chatml
30
- - path: anthracite-org/kalo_opus_misc_240827
31
- type: sharegpt
32
- conversation: chatml
33
- - path: anthracite-org/kalo_misc_part2
34
- type: sharegpt
35
- conversation: chatml
36
- - path: NewEden/Claude-Instruct-5k
37
- type: sharegpt
38
- conversation: chatml
39
- #chat_template: chatml
40
- shuffle_merged_datasets: false
41
- default_system_message: "You are a helpful assistant that responds to the user."
42
- dataset_prepared_path: /workspace/data/8b-nemo-fft-data
43
- val_set_size: 0.0
44
- output_dir: /workspace/data/8b-nemo-fft-out
45
-
46
- sequence_len: 16384
47
- sample_packing: true
48
- eval_sample_packing: false
49
- pad_to_sequence_len: true
50
-
51
- adapter:
52
- lora_model_dir:
53
- lora_r:
54
- lora_alpha:
55
- lora_dropout:
56
- lora_target_linear:
57
- lora_fan_in_fan_out:
58
-
59
- wandb_project: 8b-nemoprune-fft
60
- wandb_entity:
61
- wandb_watch:
62
- wandb_name: attempt-01
63
- wandb_log_model:
64
-
65
- gradient_accumulation_steps: 4
66
- micro_batch_size: 1
67
- num_epochs: 2
68
- optimizer: adamw_bnb_8bit
69
- lr_scheduler: cosine
70
- learning_rate: 0.000008
71
-
72
- train_on_inputs: false
73
- group_by_length: false
74
- bf16: auto
75
- fp16:
76
- tf32: false
77
-
78
- gradient_checkpointing: true
79
- early_stopping_patience:
80
- auto_resume_from_checkpoints: true
81
- local_rank:
82
- logging_steps: 1
83
- xformers_attention:
84
- flash_attention: true
85
-
86
- warmup_steps: 10
87
- evals_per_epoch:
88
- eval_table_size:
89
- eval_max_new_tokens:
90
- saves_per_epoch: 2
91
- debug:
92
- deepspeed: deepspeed_configs/zero3_bf16.json
93
- weight_decay: 0.001
94
- fsdp:
95
  fsdp_config:
 
1
+ base_model: Dans-DiscountModels/Mistral-NeMo-Minitron-8B-Base-ChatML
2
+ model_type: AutoModelForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ plugins:
6
+ - axolotl.integrations.liger.LigerPlugin
7
+ liger_rope: true
8
+ liger_rms_norm: true
9
+ #liger_swiglu: true
10
+ #liger_cross_entropy: true
11
+ #liger_fused_linear_cross_entropy: true
12
+
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ strict: false
16
+
17
+ datasets:
18
+ - path: anthracite-core/c2_logs_32k_llama3_qwen2_v1.2
19
+ type: sharegpt
20
+ conversation: chatml
21
+ - path: anthracite-org/kalo-opus-instruct-22k-no-refusal
22
+ type: sharegpt
23
+ conversation: chatml
24
+ - path: lodrick-the-lafted/kalo-opus-instruct-3k-filtered
25
+ type: sharegpt
26
+ conversation: chatml
27
+ - path: anthracite-org/nopm_claude_writing_fixed
28
+ type: sharegpt
29
+ conversation: chatml
30
+ - path: anthracite-org/kalo_opus_misc_240827
31
+ type: sharegpt
32
+ conversation: chatml
33
+ - path: anthracite-org/kalo_misc_part2
34
+ type: sharegpt
35
+ conversation: chatml
36
+ #chat_template: chatml
37
+ shuffle_merged_datasets: false
38
+ default_system_message: "You are a helpful assistant that responds to the user."
39
+ dataset_prepared_path: /workspace/data/8b-nemo-fft-data
40
+ val_set_size: 0.0
41
+ output_dir: /workspace/data/8b-nemo-fft-out
42
+
43
+ sequence_len: 16384
44
+ sample_packing: true
45
+ eval_sample_packing: false
46
+ pad_to_sequence_len: true
47
+
48
+ adapter:
49
+ lora_model_dir:
50
+ lora_r:
51
+ lora_alpha:
52
+ lora_dropout:
53
+ lora_target_linear:
54
+ lora_fan_in_fan_out:
55
+
56
+ wandb_project: 8b-nemoprune-fft
57
+ wandb_entity:
58
+ wandb_watch:
59
+ wandb_name: attempt-01
60
+ wandb_log_model:
61
+
62
+ gradient_accumulation_steps: 2
63
+ micro_batch_size: 2
64
+ num_epochs: 4
65
+ optimizer: adamw_bnb_8bit
66
+ lr_scheduler: cosine
67
+ learning_rate: 0.00001
68
+
69
+ train_on_inputs: false
70
+ group_by_length: false
71
+ bf16: auto
72
+ fp16:
73
+ tf32: false
74
+
75
+ gradient_checkpointing: true
76
+ early_stopping_patience:
77
+ auto_resume_from_checkpoints: true
78
+ local_rank:
79
+ logging_steps: 1
80
+ xformers_attention:
81
+ flash_attention: true
82
+
83
+ warmup_steps: 10
84
+ evals_per_epoch:
85
+ eval_table_size:
86
+ eval_max_new_tokens:
87
+ saves_per_epoch: 2
88
+ debug:
89
+ deepspeed: deepspeed_configs/zero3_bf16.json
90
+ weight_decay: 0.001
91
+ fsdp:
 
 
 
92
  fsdp_config: