|
[INFO|2025-02-22 11:20:55] configuration_utils.py:696 >> loading configuration file config.json from cache at C:\Users\Asad_\.cache\huggingface\hub\models--Qwen--Qwen2.5-0.5B-Instruct\snapshots\7ae557604adf67be50417f59c2c2f167def9a775\config.json
|
|
|
|
[INFO|2025-02-22 11:20:55] configuration_utils.py:768 >> Model config Qwen2Config {
|
|
"_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
|
|
"architectures": [
|
|
"Qwen2ForCausalLM"
|
|
],
|
|
"attention_dropout": 0.0,
|
|
"bos_token_id": 151643,
|
|
"eos_token_id": 151645,
|
|
"hidden_act": "silu",
|
|
"hidden_size": 896,
|
|
"initializer_range": 0.02,
|
|
"intermediate_size": 4864,
|
|
"max_position_embeddings": 32768,
|
|
"max_window_layers": 21,
|
|
"model_type": "qwen2",
|
|
"num_attention_heads": 14,
|
|
"num_hidden_layers": 24,
|
|
"num_key_value_heads": 2,
|
|
"rms_norm_eps": 1e-06,
|
|
"rope_scaling": null,
|
|
"rope_theta": 1000000.0,
|
|
"sliding_window": null,
|
|
"tie_word_embeddings": true,
|
|
"torch_dtype": "bfloat16",
|
|
"transformers_version": "4.48.3",
|
|
"use_cache": true,
|
|
"use_sliding_window": false,
|
|
"vocab_size": 151936
|
|
}
|
|
|
|
|
|
[INFO|2025-02-22 11:20:55] tokenization_utils_base.py:2034 >> loading file vocab.json from cache at C:\Users\Asad_\.cache\huggingface\hub\models--Qwen--Qwen2.5-0.5B-Instruct\snapshots\7ae557604adf67be50417f59c2c2f167def9a775\vocab.json
|
|
|
|
[INFO|2025-02-22 11:20:55] tokenization_utils_base.py:2034 >> loading file merges.txt from cache at C:\Users\Asad_\.cache\huggingface\hub\models--Qwen--Qwen2.5-0.5B-Instruct\snapshots\7ae557604adf67be50417f59c2c2f167def9a775\merges.txt
|
|
|
|
[INFO|2025-02-22 11:20:55] tokenization_utils_base.py:2034 >> loading file tokenizer.json from cache at C:\Users\Asad_\.cache\huggingface\hub\models--Qwen--Qwen2.5-0.5B-Instruct\snapshots\7ae557604adf67be50417f59c2c2f167def9a775\tokenizer.json
|
|
|
|
[INFO|2025-02-22 11:20:55] tokenization_utils_base.py:2034 >> loading file added_tokens.json from cache at None
|
|
|
|
[INFO|2025-02-22 11:20:55] tokenization_utils_base.py:2034 >> loading file special_tokens_map.json from cache at None
|
|
|
|
[INFO|2025-02-22 11:20:55] tokenization_utils_base.py:2034 >> loading file tokenizer_config.json from cache at C:\Users\Asad_\.cache\huggingface\hub\models--Qwen--Qwen2.5-0.5B-Instruct\snapshots\7ae557604adf67be50417f59c2c2f167def9a775\tokenizer_config.json
|
|
|
|
[INFO|2025-02-22 11:20:55] tokenization_utils_base.py:2034 >> loading file chat_template.jinja from cache at None
|
|
|
|
[INFO|2025-02-22 11:20:55] tokenization_utils_base.py:2304 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
|
|
|
[INFO|2025-02-22 11:20:56] configuration_utils.py:696 >> loading configuration file config.json from cache at C:\Users\Asad_\.cache\huggingface\hub\models--Qwen--Qwen2.5-0.5B-Instruct\snapshots\7ae557604adf67be50417f59c2c2f167def9a775\config.json
|
|
|
|
[INFO|2025-02-22 11:20:56] configuration_utils.py:768 >> Model config Qwen2Config {
|
|
"_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
|
|
"architectures": [
|
|
"Qwen2ForCausalLM"
|
|
],
|
|
"attention_dropout": 0.0,
|
|
"bos_token_id": 151643,
|
|
"eos_token_id": 151645,
|
|
"hidden_act": "silu",
|
|
"hidden_size": 896,
|
|
"initializer_range": 0.02,
|
|
"intermediate_size": 4864,
|
|
"max_position_embeddings": 32768,
|
|
"max_window_layers": 21,
|
|
"model_type": "qwen2",
|
|
"num_attention_heads": 14,
|
|
"num_hidden_layers": 24,
|
|
"num_key_value_heads": 2,
|
|
"rms_norm_eps": 1e-06,
|
|
"rope_scaling": null,
|
|
"rope_theta": 1000000.0,
|
|
"sliding_window": null,
|
|
"tie_word_embeddings": true,
|
|
"torch_dtype": "bfloat16",
|
|
"transformers_version": "4.48.3",
|
|
"use_cache": true,
|
|
"use_sliding_window": false,
|
|
"vocab_size": 151936
|
|
}
|
|
|
|
|
|
[INFO|2025-02-22 11:20:56] tokenization_utils_base.py:2034 >> loading file vocab.json from cache at C:\Users\Asad_\.cache\huggingface\hub\models--Qwen--Qwen2.5-0.5B-Instruct\snapshots\7ae557604adf67be50417f59c2c2f167def9a775\vocab.json
|
|
|
|
[INFO|2025-02-22 11:20:56] tokenization_utils_base.py:2034 >> loading file merges.txt from cache at C:\Users\Asad_\.cache\huggingface\hub\models--Qwen--Qwen2.5-0.5B-Instruct\snapshots\7ae557604adf67be50417f59c2c2f167def9a775\merges.txt
|
|
|
|
[INFO|2025-02-22 11:20:56] tokenization_utils_base.py:2034 >> loading file tokenizer.json from cache at C:\Users\Asad_\.cache\huggingface\hub\models--Qwen--Qwen2.5-0.5B-Instruct\snapshots\7ae557604adf67be50417f59c2c2f167def9a775\tokenizer.json
|
|
|
|
[INFO|2025-02-22 11:20:56] tokenization_utils_base.py:2034 >> loading file added_tokens.json from cache at None
|
|
|
|
[INFO|2025-02-22 11:20:56] tokenization_utils_base.py:2034 >> loading file special_tokens_map.json from cache at None
|
|
|
|
[INFO|2025-02-22 11:20:56] tokenization_utils_base.py:2034 >> loading file tokenizer_config.json from cache at C:\Users\Asad_\.cache\huggingface\hub\models--Qwen--Qwen2.5-0.5B-Instruct\snapshots\7ae557604adf67be50417f59c2c2f167def9a775\tokenizer_config.json
|
|
|
|
[INFO|2025-02-22 11:20:56] tokenization_utils_base.py:2034 >> loading file chat_template.jinja from cache at None
|
|
|
|
[INFO|2025-02-22 11:20:56] tokenization_utils_base.py:2304 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
|
|
|
[INFO|2025-02-22 11:20:56] logging.py:157 >> Add <|im_end|> to stop words.
|
|
|
|
[INFO|2025-02-22 11:20:56] logging.py:157 >> Loading dataset prithivMLmods/Deepthink-Reasoning-Ins...
|
|
|
|
[INFO|2025-02-22 11:21:19] logging.py:157 >> Loading dataset open-thoughts/OpenThoughts-114k...
|
|
|
|
[INFO|2025-02-22 11:27:18] configuration_utils.py:696 >> loading configuration file config.json from cache at C:\Users\Asad_\.cache\huggingface\hub\models--Qwen--Qwen2.5-0.5B-Instruct\snapshots\7ae557604adf67be50417f59c2c2f167def9a775\config.json
|
|
|
|
[INFO|2025-02-22 11:27:18] configuration_utils.py:768 >> Model config Qwen2Config {
|
|
"_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
|
|
"architectures": [
|
|
"Qwen2ForCausalLM"
|
|
],
|
|
"attention_dropout": 0.0,
|
|
"bos_token_id": 151643,
|
|
"eos_token_id": 151645,
|
|
"hidden_act": "silu",
|
|
"hidden_size": 896,
|
|
"initializer_range": 0.02,
|
|
"intermediate_size": 4864,
|
|
"max_position_embeddings": 32768,
|
|
"max_window_layers": 21,
|
|
"model_type": "qwen2",
|
|
"num_attention_heads": 14,
|
|
"num_hidden_layers": 24,
|
|
"num_key_value_heads": 2,
|
|
"rms_norm_eps": 1e-06,
|
|
"rope_scaling": null,
|
|
"rope_theta": 1000000.0,
|
|
"sliding_window": null,
|
|
"tie_word_embeddings": true,
|
|
"torch_dtype": "bfloat16",
|
|
"transformers_version": "4.48.3",
|
|
"use_cache": true,
|
|
"use_sliding_window": false,
|
|
"vocab_size": 151936
|
|
}
|
|
|
|
|
|
[INFO|2025-02-22 11:27:18] modeling_utils.py:3904 >> loading weights file model.safetensors from cache at C:\Users\Asad_\.cache\huggingface\hub\models--Qwen--Qwen2.5-0.5B-Instruct\snapshots\7ae557604adf67be50417f59c2c2f167def9a775\model.safetensors
|
|
|
|
[INFO|2025-02-22 11:27:18] modeling_utils.py:1582 >> Instantiating Qwen2ForCausalLM model under default dtype torch.bfloat16.
|
|
|
|
[INFO|2025-02-22 11:27:18] configuration_utils.py:1140 >> Generate config GenerationConfig {
|
|
"bos_token_id": 151643,
|
|
"eos_token_id": 151645
|
|
}
|
|
|
|
|
|
[INFO|2025-02-22 11:27:21] modeling_utils.py:4888 >> All model checkpoint weights were used when initializing Qwen2ForCausalLM.
|
|
|
|
|
|
[INFO|2025-02-22 11:27:21] modeling_utils.py:4896 >> All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at Qwen/Qwen2.5-0.5B-Instruct.
|
|
If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training.
|
|
|
|
[INFO|2025-02-22 11:27:22] configuration_utils.py:1095 >> loading configuration file generation_config.json from cache at C:\Users\Asad_\.cache\huggingface\hub\models--Qwen--Qwen2.5-0.5B-Instruct\snapshots\7ae557604adf67be50417f59c2c2f167def9a775\generation_config.json
|
|
|
|
[INFO|2025-02-22 11:27:22] configuration_utils.py:1140 >> Generate config GenerationConfig {
|
|
"bos_token_id": 151643,
|
|
"do_sample": true,
|
|
"eos_token_id": [
|
|
151645,
|
|
151643
|
|
],
|
|
"pad_token_id": 151643,
|
|
"repetition_penalty": 1.1,
|
|
"temperature": 0.7,
|
|
"top_k": 20,
|
|
"top_p": 0.8
|
|
}
|
|
|
|
|
|
[INFO|2025-02-22 11:27:22] logging.py:157 >> Gradient checkpointing enabled.
|
|
|
|
[INFO|2025-02-22 11:27:22] logging.py:157 >> Using torch SDPA for faster training and inference.
|
|
|
|
[INFO|2025-02-22 11:27:22] logging.py:157 >> Upcasting trainable params to float32.
|
|
|
|
[INFO|2025-02-22 11:27:22] logging.py:157 >> Fine-tuning method: Full
|
|
|
|
[INFO|2025-02-22 11:27:22] logging.py:157 >> trainable params: 494,032,768 || all params: 494,032,768 || trainable%: 100.0000
|
|
|
|
[INFO|2025-02-22 11:27:22] trainer.py:741 >> Using auto half precision backend
|
|
|
|
[INFO|2025-02-22 11:27:22] trainer.py:2775 >> Loading model from saves\Qwen2.5-0.5B-Instruct\full\20-02-2025\checkpoint-13000.
|
|
|
|
[WARNING|2025-02-22 11:27:23] trainer.py:3018 >> There were missing keys in the checkpoint model loaded: ['lm_head.weight'].
|
|
|
|
[INFO|2025-02-22 11:27:23] trainer.py:2369 >> ***** Running training *****
|
|
|
|
[INFO|2025-02-22 11:27:23] trainer.py:2370 >> Num examples = 102,787
|
|
|
|
[INFO|2025-02-22 11:27:23] trainer.py:2371 >> Num Epochs = 1
|
|
|
|
[INFO|2025-02-22 11:27:23] trainer.py:2372 >> Instantaneous batch size per device = 1
|
|
|
|
[INFO|2025-02-22 11:27:23] trainer.py:2375 >> Total train batch size (w. parallel, distributed & accumulation) = 8
|
|
|
|
[INFO|2025-02-22 11:27:23] trainer.py:2376 >> Gradient Accumulation steps = 8
|
|
|
|
[INFO|2025-02-22 11:27:23] trainer.py:2377 >> Total optimization steps = 12,848
|
|
|
|
[INFO|2025-02-22 11:27:23] trainer.py:2378 >> Number of trainable parameters = 494,032,768
|
|
|
|
[INFO|2025-02-22 11:27:23] trainer.py:2400 >> Continuing training from checkpoint, will skip to saved global_step
|
|
|
|
[INFO|2025-02-22 11:27:23] trainer.py:2401 >> Continuing training from epoch 1
|
|
|
|
[INFO|2025-02-22 11:27:23] trainer.py:2402 >> Continuing training from global step 13000
|
|
|
|
[INFO|2025-02-22 11:27:23] trainer.py:2404 >> Will skip the first 1 epochs then the first 1216 batches in the first epoch.
|
|
|
|
[INFO|2025-02-22 11:27:23] trainer.py:2643 >>
|
|
|
|
Training completed. Do not forget to share your model on huggingface.co/models =)
|
|
|
|
|
|
|
|
[INFO|2025-02-22 11:27:23] trainer.py:3910 >> Saving model checkpoint to saves\Qwen2.5-0.5B-Instruct\full\20-02-2025
|
|
|
|
[INFO|2025-02-22 11:27:23] configuration_utils.py:420 >> Configuration saved in saves\Qwen2.5-0.5B-Instruct\full\20-02-2025\config.json
|
|
|
|
[INFO|2025-02-22 11:27:23] configuration_utils.py:909 >> Configuration saved in saves\Qwen2.5-0.5B-Instruct\full\20-02-2025\generation_config.json
|
|
|
|
[INFO|2025-02-22 11:27:25] modeling_utils.py:2988 >> Model weights saved in saves\Qwen2.5-0.5B-Instruct\full\20-02-2025\model.safetensors
|
|
|
|
[INFO|2025-02-22 11:27:25] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves\Qwen2.5-0.5B-Instruct\full\20-02-2025\tokenizer_config.json
|
|
|
|
[INFO|2025-02-22 11:27:25] tokenization_utils_base.py:2500 >> Special tokens file saved in saves\Qwen2.5-0.5B-Instruct\full\20-02-2025\special_tokens_map.json
|
|
|
|
[WARNING|2025-02-22 11:27:26] logging.py:162 >> No metric eval_accuracy to plot.
|
|
|
|
[INFO|2025-02-22 11:27:26] trainer.py:4226 >>
|
|
***** Running Evaluation *****
|
|
|
|
[INFO|2025-02-22 11:27:26] trainer.py:4228 >> Num examples = 11421
|
|
|
|
[INFO|2025-02-22 11:27:26] trainer.py:4231 >> Batch size = 1
|
|
|
|
|