Delta-Vector commited on
Commit
e5e53f5
·
verified ·
1 Parent(s): 0726b86

Upload Nemo.yml

Browse files
Files changed (1) hide show
  1. Nemo.yml +95 -0
Nemo.yml ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: Dans-DiscountModels/Mistral-NeMo-Minitron-8B-Base-ChatML
2
+ model_type: AutoModelForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ plugins:
6
+ - axolotl.integrations.liger.LigerPlugin
7
+ liger_rope: true
8
+ liger_rms_norm: true
9
+ #liger_swiglu: true
10
+ #liger_cross_entropy: true
11
+ #liger_fused_linear_cross_entropy: true
12
+
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ strict: false
16
+
17
+ datasets:
18
+ - path: anthracite-core/c2_logs_32k_llama3_qwen2_v1.2
19
+ type: sharegpt
20
+ conversation: chatml
21
+ - path: anthracite-org/kalo-opus-instruct-22k-no-refusal
22
+ type: sharegpt
23
+ conversation: chatml
24
+ - path: lodrick-the-lafted/kalo-opus-instruct-3k-filtered
25
+ type: sharegpt
26
+ conversation: chatml
27
+ - path: anthracite-org/nopm_claude_writing_fixed
28
+ type: sharegpt
29
+ conversation: chatml
30
+ - path: anthracite-org/kalo_opus_misc_240827
31
+ type: sharegpt
32
+ conversation: chatml
33
+ - path: anthracite-org/kalo_misc_part2
34
+ type: sharegpt
35
+ conversation: chatml
36
+ - path: NewEden/Claude-Instruct-5k
37
+ type: sharegpt
38
+ conversation: chatml
39
+ #chat_template: chatml
40
+ shuffle_merged_datasets: false
41
+ default_system_message: "You are a helpful assistant that responds to the user."
42
+ dataset_prepared_path: /workspace/data/8b-nemo-fft-data
43
+ val_set_size: 0.0
44
+ output_dir: /workspace/data/8b-nemo-fft-out
45
+
46
+ sequence_len: 16384
47
+ sample_packing: true
48
+ eval_sample_packing: false
49
+ pad_to_sequence_len: true
50
+
51
+ adapter:
52
+ lora_model_dir:
53
+ lora_r:
54
+ lora_alpha:
55
+ lora_dropout:
56
+ lora_target_linear:
57
+ lora_fan_in_fan_out:
58
+
59
+ wandb_project: 8b-nemoprune-fft
60
+ wandb_entity:
61
+ wandb_watch:
62
+ wandb_name: attempt-01
63
+ wandb_log_model:
64
+
65
+ gradient_accumulation_steps: 4
66
+ micro_batch_size: 1
67
+ num_epochs: 2
68
+ optimizer: adamw_bnb_8bit
69
+ lr_scheduler: cosine
70
+ learning_rate: 0.000008
71
+
72
+ train_on_inputs: false
73
+ group_by_length: false
74
+ bf16: auto
75
+ fp16:
76
+ tf32: false
77
+
78
+ gradient_checkpointing: true
79
+ early_stopping_patience:
80
+ auto_resume_from_checkpoints: true
81
+ local_rank:
82
+ logging_steps: 1
83
+ xformers_attention:
84
+ flash_attention: true
85
+
86
+ warmup_steps: 10
87
+ evals_per_epoch:
88
+ eval_table_size:
89
+ eval_max_new_tokens:
90
+ saves_per_epoch: 2
91
+ debug:
92
+ deepspeed: deepspeed_configs/zero3_bf16.json
93
+ weight_decay: 0.001
94
+ fsdp:
95
+ fsdp_config: