| [INFO|2025-05-12 13:05:10] configuration_utils.py:696 >> loading configuration file config.json from cache at /home/kiho/.cache/huggingface/hub/models--infly--OpenCoder-8B-Instruct/snapshots/01badbbf10c2dfd7e2a0b5f570065ef44548576c/config.json | |
| [INFO|2025-05-12 13:05:10] configuration_utils.py:768 >> Model config LlamaConfig { | |
| "_name_or_path": "infly/OpenCoder-8B-Instruct", | |
| "architectures": [ | |
| "LlamaForCausalLM" | |
| ], | |
| "attention_bias": false, | |
| "attention_dropout": 0.0, | |
| "bos_token_id": 96540, | |
| "eos_token_id": 96539, | |
| "head_dim": 128, | |
| "hidden_act": "silu", | |
| "hidden_size": 4096, | |
| "initializer_range": 0.02, | |
| "intermediate_size": 14336, | |
| "max_position_embeddings": 8192, | |
| "mlp_bias": false, | |
| "model_type": "llama", | |
| "num_attention_heads": 32, | |
| "num_hidden_layers": 32, | |
| "num_key_value_heads": 8, | |
| "pretraining_tp": 1, | |
| "rms_norm_eps": 1e-05, | |
| "rope_scaling": null, | |
| "rope_theta": 500000.0, | |
| "tie_word_embeddings": false, | |
| "torch_dtype": "bfloat16", | |
| "transformers_version": "4.48.2", | |
| "use_cache": true, | |
| "vocab_size": 96640 | |
| } | |
| [INFO|2025-05-12 13:05:13] tokenization_utils_base.py:2034 >> loading file ./tokenizer.model from cache at /home/kiho/.cache/huggingface/hub/models--infly--OpenCoder-8B-Instruct/snapshots/01badbbf10c2dfd7e2a0b5f570065ef44548576c/./tokenizer.model | |
| [INFO|2025-05-12 13:05:13] tokenization_utils_base.py:2034 >> loading file added_tokens.json from cache at /home/kiho/.cache/huggingface/hub/models--infly--OpenCoder-8B-Instruct/snapshots/01badbbf10c2dfd7e2a0b5f570065ef44548576c/added_tokens.json | |
| [INFO|2025-05-12 13:05:13] tokenization_utils_base.py:2034 >> loading file special_tokens_map.json from cache at /home/kiho/.cache/huggingface/hub/models--infly--OpenCoder-8B-Instruct/snapshots/01badbbf10c2dfd7e2a0b5f570065ef44548576c/special_tokens_map.json | |
| [INFO|2025-05-12 13:05:13] tokenization_utils_base.py:2034 >> loading file tokenizer_config.json from cache at /home/kiho/.cache/huggingface/hub/models--infly--OpenCoder-8B-Instruct/snapshots/01badbbf10c2dfd7e2a0b5f570065ef44548576c/tokenizer_config.json | |
| [INFO|2025-05-12 13:05:13] tokenization_utils_base.py:2034 >> loading file tokenizer.json from cache at None | |
| [INFO|2025-05-12 13:05:13] tokenization_utils_base.py:2034 >> loading file chat_template.jinja from cache at None | |
| [INFO|2025-05-12 13:05:13] tokenization_utils_base.py:2304 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. | |
| [INFO|2025-05-12 13:05:14] configuration_utils.py:696 >> loading configuration file config.json from cache at /home/kiho/.cache/huggingface/hub/models--infly--OpenCoder-8B-Instruct/snapshots/01badbbf10c2dfd7e2a0b5f570065ef44548576c/config.json | |
| [INFO|2025-05-12 13:05:14] configuration_utils.py:768 >> Model config LlamaConfig { | |
| "_name_or_path": "infly/OpenCoder-8B-Instruct", | |
| "architectures": [ | |
| "LlamaForCausalLM" | |
| ], | |
| "attention_bias": false, | |
| "attention_dropout": 0.0, | |
| "bos_token_id": 96540, | |
| "eos_token_id": 96539, | |
| "head_dim": 128, | |
| "hidden_act": "silu", | |
| "hidden_size": 4096, | |
| "initializer_range": 0.02, | |
| "intermediate_size": 14336, | |
| "max_position_embeddings": 8192, | |
| "mlp_bias": false, | |
| "model_type": "llama", | |
| "num_attention_heads": 32, | |
| "num_hidden_layers": 32, | |
| "num_key_value_heads": 8, | |
| "pretraining_tp": 1, | |
| "rms_norm_eps": 1e-05, | |
| "rope_scaling": null, | |
| "rope_theta": 500000.0, | |
| "tie_word_embeddings": false, | |
| "torch_dtype": "bfloat16", | |
| "transformers_version": "4.48.2", | |
| "use_cache": true, | |
| "vocab_size": 96640 | |
| } | |
| [INFO|2025-05-12 13:05:15] tokenization_utils_base.py:2034 >> loading file ./tokenizer.model from cache at /home/kiho/.cache/huggingface/hub/models--infly--OpenCoder-8B-Instruct/snapshots/01badbbf10c2dfd7e2a0b5f570065ef44548576c/./tokenizer.model | |
| [INFO|2025-05-12 13:05:15] tokenization_utils_base.py:2034 >> loading file added_tokens.json from cache at /home/kiho/.cache/huggingface/hub/models--infly--OpenCoder-8B-Instruct/snapshots/01badbbf10c2dfd7e2a0b5f570065ef44548576c/added_tokens.json | |
| [INFO|2025-05-12 13:05:15] tokenization_utils_base.py:2034 >> loading file special_tokens_map.json from cache at /home/kiho/.cache/huggingface/hub/models--infly--OpenCoder-8B-Instruct/snapshots/01badbbf10c2dfd7e2a0b5f570065ef44548576c/special_tokens_map.json | |
| [INFO|2025-05-12 13:05:15] tokenization_utils_base.py:2034 >> loading file tokenizer_config.json from cache at /home/kiho/.cache/huggingface/hub/models--infly--OpenCoder-8B-Instruct/snapshots/01badbbf10c2dfd7e2a0b5f570065ef44548576c/tokenizer_config.json | |
| [INFO|2025-05-12 13:05:15] tokenization_utils_base.py:2034 >> loading file tokenizer.json from cache at None | |
| [INFO|2025-05-12 13:05:15] tokenization_utils_base.py:2034 >> loading file chat_template.jinja from cache at None | |
| [INFO|2025-05-12 13:05:15] tokenization_utils_base.py:2304 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. | |
| [INFO|2025-05-12 13:05:15] logging.py:157 >> Add <|im_end|> to stop words. | |
| [INFO|2025-05-12 13:05:15] logging.py:157 >> Loading dataset Codes3_query_filtered_330k_nlx.json... | |