Delete Luminia-8B-RP-DPO/running_log.txt
Browse files
Luminia-8B-RP-DPO/running_log.txt
DELETED
@@ -1,257 +0,0 @@
|
|
1 |
-
[WARNING|parser.py:279] 2024-08-31 19:14:40,267 >> We recommend enable `upcast_layernorm` in quantized training.
|
2 |
-
|
3 |
-
[INFO|parser.py:351] 2024-08-31 19:14:40,267 >> Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, compute dtype: torch.bfloat16
|
4 |
-
|
5 |
-
[INFO|tokenization_utils_base.py:2289] 2024-08-31 19:14:40,450 >> loading file tokenizer.json from cache at G:\Dataset\cache\hub\models--NousResearch--Meta-Llama-3.1-8B-Instruct\snapshots\d10aef7999a2b5ba950ab3974312feeedbfe0b77\tokenizer.json
|
6 |
-
|
7 |
-
[INFO|tokenization_utils_base.py:2289] 2024-08-31 19:14:40,450 >> loading file added_tokens.json from cache at None
|
8 |
-
|
9 |
-
[INFO|tokenization_utils_base.py:2289] 2024-08-31 19:14:40,450 >> loading file special_tokens_map.json from cache at G:\Dataset\cache\hub\models--NousResearch--Meta-Llama-3.1-8B-Instruct\snapshots\d10aef7999a2b5ba950ab3974312feeedbfe0b77\special_tokens_map.json
|
10 |
-
|
11 |
-
[INFO|tokenization_utils_base.py:2289] 2024-08-31 19:14:40,451 >> loading file tokenizer_config.json from cache at G:\Dataset\cache\hub\models--NousResearch--Meta-Llama-3.1-8B-Instruct\snapshots\d10aef7999a2b5ba950ab3974312feeedbfe0b77\tokenizer_config.json
|
12 |
-
|
13 |
-
[INFO|tokenization_utils_base.py:2533] 2024-08-31 19:14:40,616 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
14 |
-
|
15 |
-
[INFO|template.py:373] 2024-08-31 19:14:40,616 >> Add pad token: <|eot_id|>
|
16 |
-
|
17 |
-
[INFO|loader.py:52] 2024-08-31 19:14:40,617 >> Loading dataset qa-unc-dpo.json...
|
18 |
-
|
19 |
-
[INFO|configuration_utils.py:733] 2024-08-31 19:14:59,967 >> loading configuration file config.json from cache at G:\Dataset\cache\hub\models--NousResearch--Meta-Llama-3.1-8B-Instruct\snapshots\d10aef7999a2b5ba950ab3974312feeedbfe0b77\config.json
|
20 |
-
|
21 |
-
[INFO|configuration_utils.py:800] 2024-08-31 19:14:59,968 >> Model config LlamaConfig {
|
22 |
-
"_name_or_path": "NousResearch/Meta-Llama-3.1-8B-Instruct",
|
23 |
-
"architectures": [
|
24 |
-
"LlamaForCausalLM"
|
25 |
-
],
|
26 |
-
"attention_bias": false,
|
27 |
-
"attention_dropout": 0.0,
|
28 |
-
"bos_token_id": 128000,
|
29 |
-
"eos_token_id": [
|
30 |
-
128001,
|
31 |
-
128008,
|
32 |
-
128009
|
33 |
-
],
|
34 |
-
"hidden_act": "silu",
|
35 |
-
"hidden_size": 4096,
|
36 |
-
"initializer_range": 0.02,
|
37 |
-
"intermediate_size": 14336,
|
38 |
-
"max_position_embeddings": 131072,
|
39 |
-
"mlp_bias": false,
|
40 |
-
"model_type": "llama",
|
41 |
-
"num_attention_heads": 32,
|
42 |
-
"num_hidden_layers": 32,
|
43 |
-
"num_key_value_heads": 8,
|
44 |
-
"pretraining_tp": 1,
|
45 |
-
"rms_norm_eps": 1e-05,
|
46 |
-
"rope_scaling": {
|
47 |
-
"factor": 8.0,
|
48 |
-
"high_freq_factor": 4.0,
|
49 |
-
"low_freq_factor": 1.0,
|
50 |
-
"original_max_position_embeddings": 8192,
|
51 |
-
"rope_type": "llama3"
|
52 |
-
},
|
53 |
-
"rope_theta": 500000.0,
|
54 |
-
"tie_word_embeddings": false,
|
55 |
-
"torch_dtype": "bfloat16",
|
56 |
-
"transformers_version": "4.43.3",
|
57 |
-
"use_cache": true,
|
58 |
-
"vocab_size": 128256
|
59 |
-
}
|
60 |
-
|
61 |
-
|
62 |
-
[WARNING|rope.py:57] 2024-08-31 19:14:59,970 >> Input length is smaller than max length. Consider increase input length.
|
63 |
-
|
64 |
-
[INFO|rope.py:63] 2024-08-31 19:14:59,970 >> Using linear scaling strategy and setting scaling factor to 1.0
|
65 |
-
|
66 |
-
[INFO|quantization.py:182] 2024-08-31 19:14:59,971 >> Quantizing model to 4 bit with bitsandbytes.
|
67 |
-
|
68 |
-
[INFO|modeling_utils.py:3634] 2024-08-31 19:15:00,414 >> loading weights file model.safetensors from cache at G:\Dataset\cache\hub\models--NousResearch--Meta-Llama-3.1-8B-Instruct\snapshots\d10aef7999a2b5ba950ab3974312feeedbfe0b77\model.safetensors.index.json
|
69 |
-
|
70 |
-
[INFO|modeling_utils.py:1572] 2024-08-31 19:15:00,426 >> Instantiating LlamaForCausalLM model under default dtype torch.bfloat16.
|
71 |
-
|
72 |
-
[INFO|configuration_utils.py:1038] 2024-08-31 19:15:00,429 >> Generate config GenerationConfig {
|
73 |
-
"bos_token_id": 128000,
|
74 |
-
"eos_token_id": [
|
75 |
-
128001,
|
76 |
-
128008,
|
77 |
-
128009
|
78 |
-
]
|
79 |
-
}
|
80 |
-
|
81 |
-
|
82 |
-
[INFO|modeling_utils.py:4463] 2024-08-31 19:15:57,314 >> All model checkpoint weights were used when initializing LlamaForCausalLM.
|
83 |
-
|
84 |
-
|
85 |
-
[INFO|modeling_utils.py:4471] 2024-08-31 19:15:57,314 >> All the weights of LlamaForCausalLM were initialized from the model checkpoint at NousResearch/Meta-Llama-3.1-8B-Instruct.
|
86 |
-
If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training.
|
87 |
-
|
88 |
-
[INFO|configuration_utils.py:993] 2024-08-31 19:15:57,500 >> loading configuration file generation_config.json from cache at G:\Dataset\cache\hub\models--NousResearch--Meta-Llama-3.1-8B-Instruct\snapshots\d10aef7999a2b5ba950ab3974312feeedbfe0b77\generation_config.json
|
89 |
-
|
90 |
-
[INFO|configuration_utils.py:1038] 2024-08-31 19:15:57,502 >> Generate config GenerationConfig {
|
91 |
-
"bos_token_id": 128000,
|
92 |
-
"do_sample": true,
|
93 |
-
"eos_token_id": [
|
94 |
-
128001,
|
95 |
-
128008,
|
96 |
-
128009
|
97 |
-
],
|
98 |
-
"temperature": 0.6,
|
99 |
-
"top_p": 0.9
|
100 |
-
}
|
101 |
-
|
102 |
-
|
103 |
-
[WARNING|quantizer_bnb_4bit.py:305] 2024-08-31 19:15:57,597 >> You are calling `save_pretrained` to a 4-bit converted model, but your `bitsandbytes` version doesn't support it. If you want to save 4-bit models, make sure to have `bitsandbytes>=0.41.3` installed.
|
104 |
-
|
105 |
-
[INFO|checkpointing.py:103] 2024-08-31 19:15:57,598 >> Gradient checkpointing enabled.
|
106 |
-
|
107 |
-
[INFO|attention.py:82] 2024-08-31 19:15:57,598 >> Using FlashAttention-2 for faster training and inference.
|
108 |
-
|
109 |
-
[INFO|adapter.py:302] 2024-08-31 19:15:57,598 >> Upcasting trainable params to float32.
|
110 |
-
|
111 |
-
[INFO|adapter.py:158] 2024-08-31 19:15:57,598 >> Fine-tuning method: LoRA
|
112 |
-
|
113 |
-
[INFO|adapter.py:203] 2024-08-31 19:15:58,444 >> Loaded adapter(s): saves\LLaMA3.1-8B-Chat\lora\Luminia-8B-RP
|
114 |
-
|
115 |
-
[INFO|loader.py:196] 2024-08-31 19:15:58,454 >> trainable params: 83,886,080 || all params: 8,114,147,328 || trainable%: 1.0338
|
116 |
-
|
117 |
-
[INFO|trainer.py:648] 2024-08-31 19:15:58,522 >> Using auto half precision backend
|
118 |
-
|
119 |
-
[INFO|trainer.py:2134] 2024-08-31 19:15:58,646 >> ***** Running training *****
|
120 |
-
|
121 |
-
[INFO|trainer.py:2135] 2024-08-31 19:15:58,646 >> Num examples = 83
|
122 |
-
|
123 |
-
[INFO|trainer.py:2136] 2024-08-31 19:15:58,646 >> Num Epochs = 1
|
124 |
-
|
125 |
-
[INFO|trainer.py:2137] 2024-08-31 19:15:58,647 >> Instantaneous batch size per device = 1
|
126 |
-
|
127 |
-
[INFO|trainer.py:2140] 2024-08-31 19:15:58,647 >> Total train batch size (w. parallel, distributed & accumulation) = 1
|
128 |
-
|
129 |
-
[INFO|trainer.py:2141] 2024-08-31 19:15:58,647 >> Gradient Accumulation steps = 1
|
130 |
-
|
131 |
-
[INFO|trainer.py:2142] 2024-08-31 19:15:58,647 >> Total optimization steps = 83
|
132 |
-
|
133 |
-
[INFO|trainer.py:2143] 2024-08-31 19:15:58,649 >> Number of trainable parameters = 83,886,080
|
134 |
-
|
135 |
-
[INFO|callbacks.py:320] 2024-08-31 19:16:06,578 >> {'loss': 1.4781, 'learning_rate': 4.8230e-05, 'epoch': 0.12, 'throughput': 1118.38}
|
136 |
-
|
137 |
-
[INFO|callbacks.py:320] 2024-08-31 19:16:13,176 >> {'loss': 1.4732, 'learning_rate': 4.3172e-05, 'epoch': 0.24, 'throughput': 1219.49}
|
138 |
-
|
139 |
-
[INFO|callbacks.py:320] 2024-08-31 19:16:19,792 >> {'loss': 1.3140, 'learning_rate': 3.5542e-05, 'epoch': 0.36, 'throughput': 1301.78}
|
140 |
-
|
141 |
-
[INFO|callbacks.py:320] 2024-08-31 19:16:25,955 >> {'loss': 1.2266, 'learning_rate': 2.6419e-05, 'epoch': 0.48, 'throughput': 1322.02}
|
142 |
-
|
143 |
-
[INFO|callbacks.py:320] 2024-08-31 19:16:31,576 >> {'loss': 1.0201, 'learning_rate': 1.7095e-05, 'epoch': 0.60, 'throughput': 1319.87}
|
144 |
-
|
145 |
-
[INFO|callbacks.py:320] 2024-08-31 19:16:37,779 >> {'loss': 1.4448, 'learning_rate': 8.8901e-06, 'epoch': 0.72, 'throughput': 1318.76}
|
146 |
-
|
147 |
-
[INFO|callbacks.py:320] 2024-08-31 19:16:45,117 >> {'loss': 1.1615, 'learning_rate': 2.9659e-06, 'epoch': 0.84, 'throughput': 1347.76}
|
148 |
-
|
149 |
-
[INFO|callbacks.py:320] 2024-08-31 19:16:52,319 >> {'loss': 1.2626, 'learning_rate': 1.6100e-07, 'epoch': 0.96, 'throughput': 1363.67}
|
150 |
-
|
151 |
-
[INFO|trainer.py:3503] 2024-08-31 19:16:54,139 >> Saving model checkpoint to saves\LLaMA3.1-8B-Chat\lora\Luminia-8B-RP-DPO\checkpoint-83
|
152 |
-
|
153 |
-
[INFO|configuration_utils.py:733] 2024-08-31 19:16:54,398 >> loading configuration file config.json from cache at G:\Dataset\cache\hub\models--NousResearch--Meta-Llama-3.1-8B-Instruct\snapshots\d10aef7999a2b5ba950ab3974312feeedbfe0b77\config.json
|
154 |
-
|
155 |
-
[INFO|configuration_utils.py:800] 2024-08-31 19:16:54,401 >> Model config LlamaConfig {
|
156 |
-
"architectures": [
|
157 |
-
"LlamaForCausalLM"
|
158 |
-
],
|
159 |
-
"attention_bias": false,
|
160 |
-
"attention_dropout": 0.0,
|
161 |
-
"bos_token_id": 128000,
|
162 |
-
"eos_token_id": [
|
163 |
-
128001,
|
164 |
-
128008,
|
165 |
-
128009
|
166 |
-
],
|
167 |
-
"hidden_act": "silu",
|
168 |
-
"hidden_size": 4096,
|
169 |
-
"initializer_range": 0.02,
|
170 |
-
"intermediate_size": 14336,
|
171 |
-
"max_position_embeddings": 131072,
|
172 |
-
"mlp_bias": false,
|
173 |
-
"model_type": "llama",
|
174 |
-
"num_attention_heads": 32,
|
175 |
-
"num_hidden_layers": 32,
|
176 |
-
"num_key_value_heads": 8,
|
177 |
-
"pretraining_tp": 1,
|
178 |
-
"rms_norm_eps": 1e-05,
|
179 |
-
"rope_scaling": {
|
180 |
-
"factor": 8.0,
|
181 |
-
"high_freq_factor": 4.0,
|
182 |
-
"low_freq_factor": 1.0,
|
183 |
-
"original_max_position_embeddings": 8192,
|
184 |
-
"rope_type": "llama3"
|
185 |
-
},
|
186 |
-
"rope_theta": 500000.0,
|
187 |
-
"tie_word_embeddings": false,
|
188 |
-
"torch_dtype": "bfloat16",
|
189 |
-
"transformers_version": "4.43.3",
|
190 |
-
"use_cache": true,
|
191 |
-
"vocab_size": 128256
|
192 |
-
}
|
193 |
-
|
194 |
-
|
195 |
-
[INFO|tokenization_utils_base.py:2702] 2024-08-31 19:16:54,664 >> tokenizer config file saved in saves\LLaMA3.1-8B-Chat\lora\Luminia-8B-RP-DPO\checkpoint-83\tokenizer_config.json
|
196 |
-
|
197 |
-
[INFO|tokenization_utils_base.py:2711] 2024-08-31 19:16:54,664 >> Special tokens file saved in saves\LLaMA3.1-8B-Chat\lora\Luminia-8B-RP-DPO\checkpoint-83\special_tokens_map.json
|
198 |
-
|
199 |
-
[INFO|trainer.py:2394] 2024-08-31 19:16:54,983 >>
|
200 |
-
|
201 |
-
Training completed. Do not forget to share your model on huggingface.co/models =)
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
[INFO|trainer.py:3503] 2024-08-31 19:16:54,987 >> Saving model checkpoint to saves\LLaMA3.1-8B-Chat\lora\Luminia-8B-RP-DPO
|
206 |
-
|
207 |
-
[INFO|configuration_utils.py:733] 2024-08-31 19:16:55,209 >> loading configuration file config.json from cache at G:\Dataset\cache\hub\models--NousResearch--Meta-Llama-3.1-8B-Instruct\snapshots\d10aef7999a2b5ba950ab3974312feeedbfe0b77\config.json
|
208 |
-
|
209 |
-
[INFO|configuration_utils.py:800] 2024-08-31 19:16:55,210 >> Model config LlamaConfig {
|
210 |
-
"architectures": [
|
211 |
-
"LlamaForCausalLM"
|
212 |
-
],
|
213 |
-
"attention_bias": false,
|
214 |
-
"attention_dropout": 0.0,
|
215 |
-
"bos_token_id": 128000,
|
216 |
-
"eos_token_id": [
|
217 |
-
128001,
|
218 |
-
128008,
|
219 |
-
128009
|
220 |
-
],
|
221 |
-
"hidden_act": "silu",
|
222 |
-
"hidden_size": 4096,
|
223 |
-
"initializer_range": 0.02,
|
224 |
-
"intermediate_size": 14336,
|
225 |
-
"max_position_embeddings": 131072,
|
226 |
-
"mlp_bias": false,
|
227 |
-
"model_type": "llama",
|
228 |
-
"num_attention_heads": 32,
|
229 |
-
"num_hidden_layers": 32,
|
230 |
-
"num_key_value_heads": 8,
|
231 |
-
"pretraining_tp": 1,
|
232 |
-
"rms_norm_eps": 1e-05,
|
233 |
-
"rope_scaling": {
|
234 |
-
"factor": 8.0,
|
235 |
-
"high_freq_factor": 4.0,
|
236 |
-
"low_freq_factor": 1.0,
|
237 |
-
"original_max_position_embeddings": 8192,
|
238 |
-
"rope_type": "llama3"
|
239 |
-
},
|
240 |
-
"rope_theta": 500000.0,
|
241 |
-
"tie_word_embeddings": false,
|
242 |
-
"torch_dtype": "bfloat16",
|
243 |
-
"transformers_version": "4.43.3",
|
244 |
-
"use_cache": true,
|
245 |
-
"vocab_size": 128256
|
246 |
-
}
|
247 |
-
|
248 |
-
|
249 |
-
[INFO|tokenization_utils_base.py:2702] 2024-08-31 19:17:02,183 >> tokenizer config file saved in saves\LLaMA3.1-8B-Chat\lora\Luminia-8B-RP-DPO\tokenizer_config.json
|
250 |
-
|
251 |
-
[INFO|tokenization_utils_base.py:2711] 2024-08-31 19:17:02,184 >> Special tokens file saved in saves\LLaMA3.1-8B-Chat\lora\Luminia-8B-RP-DPO\special_tokens_map.json
|
252 |
-
|
253 |
-
[WARNING|ploting.py:89] 2024-08-31 19:17:02,350 >> No metric eval_loss to plot.
|
254 |
-
|
255 |
-
[INFO|modelcard.py:449] 2024-08-31 19:17:02,400 >> Dropping the following result as it does not have all the necessary fields:
|
256 |
-
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
|
257 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|