File size: 1,063 Bytes
af14b46
1
{"activation_function": "gelu_new", "architectures": ["GPT2LMHeadModel"], "attn_pdrop": 0.1, "bos_token_id": 50256, "embd_pdrop": 0.1, "eos_token_id": 50256, "initializer_range": 0.02, "layer_norm_epsilon": 1e-05, "model_type": "gpt2", "n_ctx": 1024, "n_embd": 768, "n_head": 12, "n_inner": null, "n_layer": 12, "n_positions": 1024, "neuron": {"compiler_version": "2.15.141.0+d3cfc8ca", "input_specs": {"attention_mask": [4, 1024], "input_ids": [4, 1024], "labels": [4, 1024]}, "model_class": "GPT2LMHeadModel", "num_neuron_cores_per_node": 32, "pipeline_parallel_size": 1, "precision": "bfloat16", "tensor_parallel_size": 1, "training": true}, "reorder_and_upcast_attn": false, "resid_pdrop": 0.1, "scale_attn_by_inverse_layer_idx": false, "scale_attn_weights": true, "summary_activation": null, "summary_first_dropout": 0.1, "summary_proj_to_labels": true, "summary_type": "cls_index", "summary_use_proj": true, "task_specific_params": {"text-generation": {"do_sample": true, "max_length": 50}}, "torch_dtype": "float32", "use_cache": true, "vocab_size": 50257}