nouamanetazi HF staff commited on
Commit
9fb6baf
·
verified ·
1 Parent(s): 4fcb49a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. configs/config_1.14G_dp16_tp8_pp1_acc2_mbs16_seq2048_zero1_tpmodeALL_vocab32k.yaml +91 -0
  2. configs/config_1.14G_dp1_tp1_pp8_acc64_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32.yaml +91 -0
  3. configs/config_1.14G_dp2_tp1_pp4_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab32k.yaml +91 -0
  4. configs/config_1.14G_dp2_tp4_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab32k.yaml +91 -0
  5. configs/config_1.14G_dp2_tp4_pp1_acc4_mbs16_seq32768_zero1_tpmodeALL_vocab32k.yaml +91 -0
  6. configs/config_1.14G_dp2_tp4_pp1_acc4_mbs16_seq8192_zero1_tpmodeALL_vocab32k.yaml +91 -0
  7. configs/config_1.14G_dp2_tp64_pp1_acc2_mbs32_seq32768_zero1_tpmodeALL_vocab32k.yaml +91 -0
  8. configs/config_1.14G_dp2_tp8_pp1_acc256_mbs4_seq2048_zero1_tpmodeRED_vocab32k.yaml +91 -0
  9. configs/config_1.14G_dp32_tp16_pp1_acc1_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml +91 -0
  10. configs/config_1.14G_dp32_tp8_pp1_acc1_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml +91 -0
  11. configs/config_1.14G_dp4_tp64_pp1_acc64_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml +91 -0
  12. configs/config_1.14G_dp4_tp8_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab32k.yaml +91 -0
  13. configs/config_1.14G_dp8_tp16_pp1_acc1_mbs16_seq32768_zero1_tpmodeRED_vocab32k.yaml +91 -0
  14. configs/config_1.14G_dp8_tp16_pp1_acc4_mbs4_seq8192_zero1_tpmodeALL_vocab32k.yaml +91 -0
  15. configs/config_1.14G_dp8_tp32_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml +91 -0
  16. configs/config_1.14G_dp8_tp32_pp1_acc8_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml +91 -0
  17. configs/config_1.34G_dp128_tp1_pp2_acc16_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml +91 -0
  18. configs/config_1.34G_dp128_tp4_pp1_acc1_mbs4_seq2048_zero1_tpmodeALL_vocab131k.yaml +91 -0
  19. configs/config_1.34G_dp16_tp8_pp1_acc4_mbs4_seq4096_zero0_tpmodeRED_vocab131k.yaml +91 -0
  20. configs/config_1.34G_dp1_tp8_pp2_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml +91 -0
  21. configs/config_1.34G_dp1_tp8_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml +91 -0
  22. configs/config_1.34G_dp2_tp16_pp1_acc2_mbs128_seq2048_zero1_tpmodeRED_vocab131k.yaml +91 -0
  23. configs/config_1.34G_dp2_tp16_pp4_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +91 -0
  24. configs/config_1.34G_dp2_tp1_pp4_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml +91 -0
  25. configs/config_1.34G_dp2_tp256_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml +91 -0
  26. configs/config_1.34G_dp32_tp4_pp1_acc1_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml +91 -0
  27. configs/config_1.34G_dp32_tp4_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml +91 -0
  28. configs/config_1.34G_dp4_tp16_pp1_acc2_mbs16_seq32768_zero1_tpmodeALL_vocab131k.yaml +91 -0
  29. configs/config_1.34G_dp4_tp16_pp1_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +91 -0
  30. configs/config_1.34G_dp4_tp2_pp2_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +91 -0
  31. configs/config_1.34G_dp4_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab131k.yaml +91 -0
  32. configs/config_1.34G_dp4_tp32_pp1_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml +91 -0
  33. configs/config_1.34G_dp4_tp4_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab131k.yaml +91 -0
  34. configs/config_1.34G_dp4_tp64_pp1_acc16_mbs2_seq8192_zero1_tpmodeRED_vocab131k.yaml +91 -0
  35. configs/config_1.34G_dp4_tp64_pp1_acc8_mbs16_seq2048_zero1_tpmodeALL_vocab131k.yaml +91 -0
  36. configs/config_1.34G_dp64_tp1_pp2_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml +91 -0
  37. configs/config_1.34G_dp8_tp2_pp16_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml +91 -0
  38. configs/config_1.34G_dp8_tp4_pp1_acc1_mbs16_seq32768_zero1_tpmodeALL_vocab131k.yaml +91 -0
  39. configs/config_1.34G_dp8_tp8_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml +91 -0
  40. configs/config_3.57G_dp1_tp16_pp1_acc1_mbs40_seq4096_zero0_tpmodeALL_vocab131k_cache.yaml +91 -0
  41. configs/config_3.57G_dp1_tp32_pp2_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k.yaml +91 -0
  42. configs/config_3.57G_dp1_tp4_pp8_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml +91 -0
  43. configs/config_3.57G_dp1_tp8_pp1_acc1_mbs26_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml +91 -0
  44. configs/config_3.57G_dp1_tp8_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml +91 -0
  45. configs/config_3.57G_dp2_tp1_pp16_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml +91 -0
  46. configs/config_3.57G_dp2_tp1_pp4_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml +91 -0
  47. configs/config_3.57G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml +91 -0
  48. configs/config_3.57G_dp4_tp16_pp4_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml +91 -0
  49. configs/config_3.57G_dp4_tp32_pp2_acc1_mbs64_seq4096_zero1_tpmodeRED_vocab131k.yaml +91 -0
  50. configs/config_3.57G_dp64_tp4_pp1_acc2_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml +91 -0
configs/config_1.14G_dp16_tp8_pp1_acc2_mbs16_seq2048_zero1_tpmodeALL_vocab32k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp16_tp8_pp1_acc2_mbs16_seq2048_zero1_tpmodeALL_vocab32k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 2048
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 16
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 8
77
+ tp_linear_async_communication: false
78
+ tp_mode: ALL_REDUCE
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 2
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 16
89
+ sequence_length: 2048
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.14G_dp1_tp1_pp8_acc64_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: bench_seqlen.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp1_tp1_pp8_acc64_mbs1_seq2048_zero0_tpmodeRED_l16_h2048_heads32
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 2048
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 0
71
+ parallelism:
72
+ dp: 1
73
+ expert_parallel_size: 1
74
+ pp: 8
75
+ pp_engine: 1f1b
76
+ tp: 1
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 64
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 1
89
+ sequence_length: 2048
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.14G_dp2_tp1_pp4_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab32k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: bench_seqlen.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp2_tp1_pp4_acc2_mbs32_seq32768_zero1_tpmodeRED_vocab32k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 32768
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 2
73
+ expert_parallel_size: 1
74
+ pp: 4
75
+ pp_engine: 1f1b
76
+ tp: 1
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 2
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 32
89
+ sequence_length: 32768
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.14G_dp2_tp4_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab32k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp2_tp4_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab32k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 8192
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 2
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 4
77
+ tp_linear_async_communication: false
78
+ tp_mode: ALL_REDUCE
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 128
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 2
89
+ sequence_length: 8192
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.14G_dp2_tp4_pp1_acc4_mbs16_seq32768_zero1_tpmodeALL_vocab32k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp2_tp4_pp1_acc4_mbs16_seq32768_zero1_tpmodeALL_vocab32k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 32768
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 2
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 4
77
+ tp_linear_async_communication: false
78
+ tp_mode: ALL_REDUCE
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 4
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 16
89
+ sequence_length: 32768
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.14G_dp2_tp4_pp1_acc4_mbs16_seq8192_zero1_tpmodeALL_vocab32k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp2_tp4_pp1_acc4_mbs16_seq8192_zero1_tpmodeALL_vocab32k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 8192
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 2
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 4
77
+ tp_linear_async_communication: false
78
+ tp_mode: ALL_REDUCE
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 4
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 16
89
+ sequence_length: 8192
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.14G_dp2_tp64_pp1_acc2_mbs32_seq32768_zero1_tpmodeALL_vocab32k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp2_tp64_pp1_acc2_mbs32_seq32768_zero1_tpmodeALL_vocab32k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 32768
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 2
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 64
77
+ tp_linear_async_communication: false
78
+ tp_mode: ALL_REDUCE
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 2
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 32
89
+ sequence_length: 32768
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.14G_dp2_tp8_pp1_acc256_mbs4_seq2048_zero1_tpmodeRED_vocab32k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp2_tp8_pp1_acc256_mbs4_seq2048_zero1_tpmodeRED_vocab32k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 2048
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 2
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 8
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 256
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 4
89
+ sequence_length: 2048
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.14G_dp32_tp16_pp1_acc1_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp32_tp16_pp1_acc1_mbs4_seq8192_zero1_tpmodeRED_vocab32k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 8192
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 32
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 16
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 1
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 4
89
+ sequence_length: 8192
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.14G_dp32_tp8_pp1_acc1_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp32_tp8_pp1_acc1_mbs1_seq32768_zero1_tpmodeALL_vocab32k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 32768
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 32
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 8
77
+ tp_linear_async_communication: false
78
+ tp_mode: ALL_REDUCE
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 1
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 1
89
+ sequence_length: 32768
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.14G_dp4_tp64_pp1_acc64_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp4_tp64_pp1_acc64_mbs2_seq8192_zero1_tpmodeRED_vocab32k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 8192
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 4
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 64
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 64
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 2
89
+ sequence_length: 8192
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.14G_dp4_tp8_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab32k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp4_tp8_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab32k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 32768
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 4
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 8
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 2
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 4
89
+ sequence_length: 32768
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.14G_dp8_tp16_pp1_acc1_mbs16_seq32768_zero1_tpmodeRED_vocab32k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp8_tp16_pp1_acc1_mbs16_seq32768_zero1_tpmodeRED_vocab32k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 32768
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 8
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 16
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 1
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 16
89
+ sequence_length: 32768
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.14G_dp8_tp16_pp1_acc4_mbs4_seq8192_zero1_tpmodeALL_vocab32k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp8_tp16_pp1_acc4_mbs4_seq8192_zero1_tpmodeALL_vocab32k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 8192
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 8
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 16
77
+ tp_linear_async_communication: false
78
+ tp_mode: ALL_REDUCE
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 4
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 4
89
+ sequence_length: 8192
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.14G_dp8_tp32_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp8_tp32_pp1_acc4_mbs1_seq32768_zero1_tpmodeALL_vocab32k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 32768
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 8
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 32
77
+ tp_linear_async_communication: false
78
+ tp_mode: ALL_REDUCE
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 4
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 1
89
+ sequence_length: 32768
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.14G_dp8_tp32_pp1_acc8_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.14G_dp8_tp32_pp1_acc8_mbs2_seq8192_zero1_tpmodeRED_vocab32k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 8192
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 32768
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 8
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 32
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 8
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 2
89
+ sequence_length: 8192
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp128_tp1_pp2_acc16_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp128_tp1_pp2_acc16_mbs1_seq2048_zero1_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 2048
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 128
73
+ expert_parallel_size: 1
74
+ pp: 2
75
+ pp_engine: 1f1b
76
+ tp: 1
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 16
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 1
89
+ sequence_length: 2048
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp128_tp4_pp1_acc1_mbs4_seq2048_zero1_tpmodeALL_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp128_tp4_pp1_acc1_mbs4_seq2048_zero1_tpmodeALL_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 2048
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 128
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 4
77
+ tp_linear_async_communication: false
78
+ tp_mode: ALL_REDUCE
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 1
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 4
89
+ sequence_length: 2048
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp16_tp8_pp1_acc4_mbs4_seq4096_zero0_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp16_tp8_pp1_acc4_mbs4_seq4096_zero0_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 0
71
+ parallelism:
72
+ dp: 16
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 8
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 4
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 4
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp1_tp8_pp2_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp1_tp8_pp2_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 0
71
+ parallelism:
72
+ dp: 1
73
+ expert_parallel_size: 1
74
+ pp: 2
75
+ pp_engine: 1f1b
76
+ tp: 8
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 16
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 16
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp1_tp8_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp1_tp8_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 0
71
+ parallelism:
72
+ dp: 1
73
+ expert_parallel_size: 1
74
+ pp: 2
75
+ pp_engine: 1f1b
76
+ tp: 8
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 8
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 32
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp2_tp16_pp1_acc2_mbs128_seq2048_zero1_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp2_tp16_pp1_acc2_mbs128_seq2048_zero1_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 2048
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 2
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 16
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 2
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 128
89
+ sequence_length: 2048
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp2_tp16_pp4_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp2_tp16_pp4_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 0
71
+ parallelism:
72
+ dp: 2
73
+ expert_parallel_size: 1
74
+ pp: 4
75
+ pp_engine: 1f1b
76
+ tp: 16
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 128
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 1
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp2_tp1_pp4_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp2_tp1_pp4_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 2
73
+ expert_parallel_size: 1
74
+ pp: 4
75
+ pp_engine: 1f1b
76
+ tp: 1
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 4
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 32
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp2_tp256_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp2_tp256_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 8192
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 2
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 256
77
+ tp_linear_async_communication: false
78
+ tp_mode: ALL_REDUCE
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 32
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 2
89
+ sequence_length: 8192
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp32_tp4_pp1_acc1_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp32_tp4_pp1_acc1_mbs8_seq4096_zero0_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 0
71
+ parallelism:
72
+ dp: 32
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 4
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 1
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 8
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp32_tp4_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp32_tp4_pp1_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 2048
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 32
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 4
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 32
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 2
89
+ sequence_length: 2048
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp4_tp16_pp1_acc2_mbs16_seq32768_zero1_tpmodeALL_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp4_tp16_pp1_acc2_mbs16_seq32768_zero1_tpmodeALL_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 32768
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 4
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 16
77
+ tp_linear_async_communication: false
78
+ tp_mode: ALL_REDUCE
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 2
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 16
89
+ sequence_length: 32768
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp4_tp16_pp1_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp4_tp16_pp1_acc64_mbs1_seq4096_zero0_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 0
71
+ parallelism:
72
+ dp: 4
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 16
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 64
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 1
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp4_tp2_pp2_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp4_tp2_pp2_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 4
73
+ expert_parallel_size: 1
74
+ pp: 2
75
+ pp_engine: 1f1b
76
+ tp: 2
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 32
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 2
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp4_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp4_tp32_pp1_acc1_mbs8_seq32768_zero1_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 32768
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 4
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 32
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 1
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 8
89
+ sequence_length: 32768
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp4_tp32_pp1_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp4_tp32_pp1_acc32_mbs4_seq2048_zero1_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 2048
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 4
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 32
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 32
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 4
89
+ sequence_length: 2048
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp4_tp4_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp4_tp4_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 2048
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 4
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 4
77
+ tp_linear_async_communication: false
78
+ tp_mode: ALL_REDUCE
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 32
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 16
89
+ sequence_length: 2048
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp4_tp64_pp1_acc16_mbs2_seq8192_zero1_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp4_tp64_pp1_acc16_mbs2_seq8192_zero1_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 8192
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 4
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 64
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 16
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 2
89
+ sequence_length: 8192
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp4_tp64_pp1_acc8_mbs16_seq2048_zero1_tpmodeALL_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp4_tp64_pp1_acc8_mbs16_seq2048_zero1_tpmodeALL_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 2048
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 4
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 64
77
+ tp_linear_async_communication: false
78
+ tp_mode: ALL_REDUCE
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 8
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 16
89
+ sequence_length: 2048
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp64_tp1_pp2_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp64_tp1_pp2_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 64
73
+ expert_parallel_size: 1
74
+ pp: 2
75
+ pp_engine: 1f1b
76
+ tp: 1
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 2
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 2
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp8_tp2_pp16_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp8_tp2_pp16_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 8
73
+ expert_parallel_size: 1
74
+ pp: 16
75
+ pp_engine: 1f1b
76
+ tp: 2
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 32
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 1
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp8_tp4_pp1_acc1_mbs16_seq32768_zero1_tpmodeALL_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp8_tp4_pp1_acc1_mbs16_seq32768_zero1_tpmodeALL_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 32768
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 8
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 4
77
+ tp_linear_async_communication: false
78
+ tp_mode: ALL_REDUCE
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 1
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 16
89
+ sequence_length: 32768
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_1.34G_dp8_tp8_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 1.34G_dp8_tp8_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 2048
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 32768
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 16
44
+ num_key_value_heads: 8
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 8
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 8
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 2
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 2
89
+ sequence_length: 32768
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_3.57G_dp1_tp16_pp1_acc1_mbs40_seq4096_zero0_tpmodeALL_vocab131k_cache.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 3.57G_dp1_tp16_pp1_acc1_mbs40_seq4096_zero0_tpmodeALL_vocab131k_cache
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 3072
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 28
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 0
71
+ parallelism:
72
+ dp: 1
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 16
77
+ tp_linear_async_communication: false
78
+ tp_mode: ALL_REDUCE
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 1
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 40
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_3.57G_dp1_tp32_pp2_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 3.57G_dp1_tp32_pp2_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 3072
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 28
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 0
71
+ parallelism:
72
+ dp: 1
73
+ expert_parallel_size: 1
74
+ pp: 2
75
+ pp_engine: 1f1b
76
+ tp: 32
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 4
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 64
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_3.57G_dp1_tp4_pp8_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 3.57G_dp1_tp4_pp8_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 3072
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 28
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 0
71
+ parallelism:
72
+ dp: 1
73
+ expert_parallel_size: 1
74
+ pp: 8
75
+ pp_engine: 1f1b
76
+ tp: 4
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 32
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 8
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_3.57G_dp1_tp8_pp1_acc1_mbs26_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 3.57G_dp1_tp8_pp1_acc1_mbs26_seq4096_zero0_tpmodeRED_vocab131k_cache
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 3072
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 28
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 0
71
+ parallelism:
72
+ dp: 1
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 8
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 1
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 26
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_3.57G_dp1_tp8_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_cache.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 3.57G_dp1_tp8_pp1_acc1_mbs2_seq4096_zero0_tpmodeRED_vocab131k_cache
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 3072
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 28
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 0
71
+ parallelism:
72
+ dp: 1
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 8
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 1
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 2
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_3.57G_dp2_tp1_pp16_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 3.57G_dp2_tp1_pp16_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 3072
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 28
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 0
71
+ parallelism:
72
+ dp: 2
73
+ expert_parallel_size: 1
74
+ pp: 16
75
+ pp_engine: 1f1b
76
+ tp: 1
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 128
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 1
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_3.57G_dp2_tp1_pp4_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 3.57G_dp2_tp1_pp4_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 3072
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 28
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 2
73
+ expert_parallel_size: 1
74
+ pp: 4
75
+ pp_engine: 1f1b
76
+ tp: 1
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 8
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 16
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_3.57G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 3.57G_dp2_tp4_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 3072
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 28
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 2
73
+ expert_parallel_size: 1
74
+ pp: 2
75
+ pp_engine: 1f1b
76
+ tp: 4
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 32
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 4
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_3.57G_dp4_tp16_pp4_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 3.57G_dp4_tp16_pp4_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 3072
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 28
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 4
73
+ expert_parallel_size: 1
74
+ pp: 4
75
+ pp_engine: 1f1b
76
+ tp: 16
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 4
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 16
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_3.57G_dp4_tp32_pp2_acc1_mbs64_seq4096_zero1_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 3.57G_dp4_tp32_pp2_acc1_mbs64_seq4096_zero1_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 3072
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 28
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 1
71
+ parallelism:
72
+ dp: 4
73
+ expert_parallel_size: 1
74
+ pp: 2
75
+ pp_engine: 1f1b
76
+ tp: 32
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 1
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 64
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100
configs/config_3.57G_dp64_tp4_pp1_acc2_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoints:
2
+ checkpoint_interval: 10000
3
+ checkpoints_path: checkpoints
4
+ checkpoints_path_is_shared_file_system: false
5
+ resume_checkpoint_path: null
6
+ save_initial_state: false
7
+ data_stages:
8
+ - data:
9
+ dataset: null
10
+ num_loading_workers: 1
11
+ seed: 42
12
+ name: Stable Training Stage
13
+ start_training_step: 1
14
+ general:
15
+ benchmark_csv_path: benchmark/results/bench_final2.csv
16
+ consumed_train_samples: null
17
+ ignore_sanity_checks: true
18
+ project: debug
19
+ run: 3.57G_dp64_tp4_pp1_acc2_mbs32_seq4096_zero0_tpmodeRED_vocab131k
20
+ seed: 42
21
+ step: null
22
+ lighteval: null
23
+ logging:
24
+ iteration_step_info_interval: 1
25
+ log_level: info
26
+ log_level_replica: info
27
+ model:
28
+ ddp_bucket_cap_mb: 25
29
+ dtype: bfloat16
30
+ init_method:
31
+ std: 0.02
32
+ make_vocab_size_divisible_by: 1
33
+ model_config:
34
+ bos_token_id: 0
35
+ eos_token_id: 0
36
+ hidden_act: silu
37
+ hidden_size: 3072
38
+ initializer_range: 0.02
39
+ intermediate_size: 8192
40
+ is_llama_config: true
41
+ max_position_embeddings: 4096
42
+ num_attention_heads: 32
43
+ num_hidden_layers: 28
44
+ num_key_value_heads: 32
45
+ pad_token_id: null
46
+ pretraining_tp: 1
47
+ rms_norm_eps: 1.0e-05
48
+ rope_scaling: null
49
+ tie_word_embeddings: true
50
+ use_cache: true
51
+ vocab_size: 131072
52
+ optimizer:
53
+ accumulate_grad_in_fp32: true
54
+ clip_grad: 1.0
55
+ learning_rate_scheduler:
56
+ learning_rate: 0.0003
57
+ lr_decay_starting_step: null
58
+ lr_decay_steps: 13
59
+ lr_decay_style: cosine
60
+ lr_warmup_steps: 2
61
+ lr_warmup_style: linear
62
+ min_decay_lr: 1.0e-05
63
+ optimizer_factory:
64
+ adam_beta1: 0.9
65
+ adam_beta2: 0.95
66
+ adam_eps: 1.0e-08
67
+ name: adamW
68
+ torch_adam_is_fused: true
69
+ weight_decay: 0.01
70
+ zero_stage: 0
71
+ parallelism:
72
+ dp: 64
73
+ expert_parallel_size: 1
74
+ pp: 1
75
+ pp_engine: 1f1b
76
+ tp: 4
77
+ tp_linear_async_communication: true
78
+ tp_mode: REDUCE_SCATTER
79
+ profiler: null
80
+ tokenizer:
81
+ tokenizer_max_length: null
82
+ tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
83
+ tokenizer_revision: null
84
+ tokens:
85
+ batch_accumulation_per_replica: 2
86
+ limit_test_batches: 0
87
+ limit_val_batches: 0
88
+ micro_batch_size: 32
89
+ sequence_length: 4096
90
+ train_steps: 100
91
+ val_check_interval: 100