{ "model_type": "gpt_bigcode", "architectures": [ "GPTBigCodeForCausalLM" ], "pre_weights": [ { "name": "transformer.wte.weight", "is_embed": true }, { "name": "transformer.wpe.weight" } ], "post_weights": [ { "name": "transformer.ln_f.weight" }, { "name": "transformer.ln_f.bias" }, { "name": "lm_head.weight", "aliases": [ "transformer.wte.weight" ] } ], "num_layers_config_key": "n_layer", "layer_templates": { "weights": [ { "name": "transformer.h.${layer_index}.attn.c_attn.weight" }, { "name": "transformer.h.${layer_index}.attn.c_attn.bias" }, { "name": "transformer.h.${layer_index}.attn.c_proj.weight" }, { "name": "transformer.h.${layer_index}.attn.c_proj.bias" }, { "name": "transformer.h.${layer_index}.ln_1.weight" }, { "name": "transformer.h.${layer_index}.ln_1.bias" }, { "name": "transformer.h.${layer_index}.ln_2.weight" }, { "name": "transformer.h.${layer_index}.ln_2.bias" }, { "name": "transformer.h.${layer_index}.mlp.c_proj.weight" }, { "name": "transformer.h.${layer_index}.mlp.c_proj.bias" }, { "name": "transformer.h.${layer_index}.mlp.c_fc.weight" }, { "name": "transformer.h.${layer_index}.mlp.c_fc.bias" } ] } }