mtasic85 commited on
Commit
374df08
·
1 Parent(s): 98b9040

pretrain_core_model_1

Browse files
Files changed (3) hide show
  1. README.md +8 -0
  2. config-1.json +29 -0
  3. scripts/pretrain_core_model_1.yaml +154 -0
README.md CHANGED
@@ -196,3 +196,11 @@ CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable
196
  | - leaderboard_musr_object_placements | 1|none | 0|acc_norm |↑ |0.2227|± |0.0261|
197
  | - leaderboard_musr_team_allocation | 1|none | 0|acc_norm |↑ |0.3960|± |0.0310|
198
  ```
 
 
 
 
 
 
 
 
 
196
  | - leaderboard_musr_object_placements | 1|none | 0|acc_norm |↑ |0.2227|± |0.0261|
197
  | - leaderboard_musr_team_allocation | 1|none | 0|acc_norm |↑ |0.3960|± |0.0310|
198
  ```
199
+
200
+ ```bash
201
+ litgpt convert_pretrained_checkpoint ../out/pretrain-core-0/final ../out/pretrain-core-0/checkpoint
202
+ ```
203
+
204
+ ```bash
205
+ CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt pretrain --config pretrain_core_model_1.yaml
206
+ ```
config-1.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 1,
9
+ "head_dim": 64,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 512,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 1365,
14
+ "max_position_embeddings": 131072,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 8,
18
+ "num_hidden_layers": 32,
19
+ "num_key_value_heads": 8,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_scaling": null,
23
+ "rope_theta": 16000.0,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.45.0.dev0",
27
+ "use_cache": true,
28
+ "vocab_size": 131072
29
+ }
scripts/pretrain_core_model_1.yaml ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The name of the model to pretrain. Choose from names in ``litgpt.config``. Mutually exclusive with
2
+ # ``model_config``. (type: Optional[str], default: null)
3
+ model_name: 'tangled-alpha-0.11-core'
4
+
5
+ # A ``litgpt.Config`` object to define the model architecture. Mutually exclusive with
6
+ # ``model_config``. (type: Optional[Config], default: null)
7
+ model_config:
8
+ name: 'tangled-alpha-0.11-core'
9
+ block_size: 131072
10
+ vocab_size: 131072
11
+ padded_vocab_size: 131072
12
+ n_layer: 32
13
+ n_head: 8
14
+ n_embd: 512
15
+ n_query_groups: 8
16
+ rotary_percentage: 1.0
17
+ parallel_residual: False
18
+ bias: False
19
+ norm_class_name: "RMSNorm"
20
+ mlp_class_name: "LLaMAMLP"
21
+ intermediate_size: 1365 # n_embd * 2.666
22
+ norm_eps: 1e-5
23
+ rope_base: 16000 # https://arxiv.org/pdf/2405.14591
24
+ head_size: 64 # n_embd / n_head
25
+
26
+ # Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
27
+ # /teamspace/jobs/<job-name>/share. (type: <class 'Path'>, default: out/pretrain)
28
+ out_dir: "../out/pretrain-core-1/"
29
+
30
+ # The precision to use for pretraining. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null)
31
+ # precision: bf16-mixed
32
+ precision: bf16-true
33
+
34
+ # Optional path to a checkpoint directory to initialize the model from.
35
+ # Useful for continued pretraining. Mutually exclusive with ``resume``. (type: Optional[Path], default: null)
36
+ initial_checkpoint_dir: "../out/pretrain-core-0/checkpoint"
37
+
38
+ # Path to a checkpoint directory to resume from in case training was interrupted, or ``True`` to resume
39
+ # from the latest checkpoint in ``out_dir``. An error will be raised if no checkpoint is found. Passing
40
+ # ``'auto'`` will resume from the latest checkpoint but not error if no checkpoint exists.
41
+ # (type: Union[bool, Literal["auto"], Path], default: False)
42
+ resume:
43
+
44
+ # Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``.
45
+ data:
46
+ class_path: LitData
47
+
48
+ init_args:
49
+ data_path: "../core-data-1-1025-2049-2049-8000/"
50
+ num_workers: 32
51
+
52
+ # Training-related arguments. See ``litgpt.args.TrainArgs`` for details
53
+ train:
54
+ # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000)
55
+ save_interval: 50
56
+
57
+ # Number of iterations between logging calls (type: int, default: 1)
58
+ log_interval: 1
59
+
60
+ # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 512)
61
+ global_batch_size: 512
62
+
63
+ # Number of samples per data-parallel rank (type: int, default: 4)
64
+ micro_batch_size: 4
65
+
66
+ # Number of iterations with learning rate warmup active (type: int, default: 2000)
67
+ lr_warmup_steps: 0
68
+
69
+ # Number of epochs to train on (type: Optional[int], default: null)
70
+ epochs:
71
+
72
+ # Total number of tokens to train on (type: Optional[int], default: 3000000000000)
73
+ max_tokens: 1830709785
74
+
75
+ # Limits the number of optimizer steps to run. (type: Optional[int], default: null)
76
+ max_steps:
77
+
78
+ # Limits the length of samples. Off by default (type: Optional[int], default: null)
79
+ max_seq_length: 2049
80
+
81
+ # Whether to tie the embedding weights with the language modeling head weights. (type: Optional[bool], default: False)
82
+ tie_embeddings: false
83
+
84
+ # (type: Optional[float], default: 1.0)
85
+ max_norm: 1.0
86
+
87
+ # (type: float, default: 4e-05)
88
+ min_lr: 1e-6
89
+
90
+ # Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details
91
+ eval:
92
+ # Number of optimizer steps between evaluation calls (type: int, default: 1000)
93
+ interval: 50
94
+
95
+ # Number of tokens to generate (type: Optional[int], default: null)
96
+ max_new_tokens:
97
+
98
+ # Number of iterations (type: int, default: 100)
99
+ max_iters: 100
100
+
101
+ # Whether to evaluate on the validation set at the beginning of the training
102
+ initial_validation: true
103
+
104
+ # Whether to evaluate on the validation set at the end the training
105
+ final_validation: true
106
+
107
+ # Optimizer-related arguments
108
+ # optimizer:
109
+ # class_path: torch.optim.AdamW
110
+ # init_args:
111
+ # # (type: float, default: 0.001)
112
+ # lr: 3e-4
113
+ # # (type: float, default: 0.01)
114
+ # weight_decay: 0.01
115
+ # # (type: tuple, default: (0.9,0.999))
116
+ # betas:
117
+ # - 0.9
118
+ # - 0.999
119
+
120
+ # optimizer:
121
+ # class_path: sophia_opt.SophiaG
122
+ # init_args:
123
+ # lr: 3e-4
124
+ # betas:
125
+ # - 0.9
126
+ # - 0.95
127
+ # rho: 0.05
128
+ # weight_decay: 0.1
129
+
130
+ optimizer:
131
+ class_path: sophia_opt.SophiaG
132
+ init_args:
133
+ lr: 1e-5
134
+ betas:
135
+ - 0.965
136
+ - 0.99
137
+ rho: 0.04
138
+ weight_decay: 1e-1
139
+
140
+ # How many devices/GPUs to use. Uses all GPUs by default. (type: Union[int, str], default: auto)
141
+ devices: auto
142
+
143
+ # How many nodes to use. (type: int, default: 1)
144
+ num_nodes: 1
145
+
146
+ # Optional path to the tokenizer dir that was used for preprocessing the dataset. Only some data
147
+ # module require this. (type: Optional[Path], default: null)
148
+ tokenizer_dir: "../tokenizer"
149
+
150
+ # The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: tensorboard)
151
+ logger_name: "wandb"
152
+
153
+ # The random seed to use for reproducibility. (type: int, default: 42)
154
+ seed: 23