{ | |
"_name_or_path": "wanderer2k1/base_LLM_183MB_concat_and_chunk", | |
"abili": false, | |
"architectures": [ | |
"MptForCausalLM" | |
], | |
"attn_config": { | |
"model_type": "" | |
}, | |
"d_model": 768, | |
"emb_pdrop": 0.0, | |
"embedding_fraction": 1.0, | |
"expansion_ratio": 4, | |
"init_device": "cpu", | |
"initializer_range": 0.02, | |
"layer_norm_epsilon": 1e-05, | |
"learned_pos_emb": true, | |
"logit_scale": null, | |
"max_seq_len": 8192, | |
"model_type": "mpt", | |
"n_heads": 12, | |
"n_layers": 12, | |
"no_bias": true, | |
"norm_type": "low_precision_layernorm", | |
"pad_token_id": 128009, | |
"resid_pdrop": 0.0, | |
"rope": true, | |
"rope_theta": 100000, | |
"torch_dtype": "float32", | |
"transformers_version": "4.40.0", | |
"use_cache": false, | |
"verbose": 0, | |
"vocab_size": 128256 | |
} | |