|
{ |
|
"activation_function": "gelu_new", |
|
"architectures": ["GPT2LMHeadModel"], |
|
"attn_pdrop": 0.1, |
|
"bos_token_id": 50256, |
|
"embd_pdrop": 0.1, |
|
"eos_token_id": 50256, |
|
"initializer_range": 0.02, |
|
"layer_norm_epsilon": 1e-05, |
|
"model_type": "gpt2", |
|
"n_ctx": 2048, |
|
"n_embd": 2048, |
|
"n_head": 16, |
|
"n_layer": 24, |
|
"n_positions": 2048, |
|
"predict_special_tokens": true, |
|
"resid_pdrop": 0.1, |
|
"transformers_version": "4.34.0", |
|
"vocab_size": 50257, |
|
"task_specific_params": { |
|
"text-generation": { |
|
"do_sample": true, |
|
"max_length": 2048 |
|
} |
|
}, |
|
"language": ["en"], |
|
"tags": ["text-generation-inference"], |
|
"metrics": ["accuracy"], |
|
"pipeline_tag": "text-generation", |
|
"library_name": "transformers", |
|
"license": "apache-2.0", |
|
"custom_params": { |
|
"adaptation_rate": 0.05, |
|
"ecosystem_dynamics": { |
|
"environmental_volatility": 0.1, |
|
"resource_pool": 1 |
|
}, |
|
"hidden_dim": 2048, |
|
"initial_neuron_count": 5000, |
|
"innovative_growth_net": { |
|
"adaptation_rate": 0.05, |
|
"initial_capacity": 250000, |
|
"input_size": 2048 |
|
}, |
|
"max_complexity": 50000, |
|
"max_neurons": 250000, |
|
"max_sequence_length": 2048, |
|
"min_epochs_before_growth": 5, |
|
"model_filename": "pytorch_model.bin", |
|
"num_embeddings": 100000, |
|
"pruning_improvement_threshold": 0.005, |
|
"some_adaptation_rate": 0.05, |
|
"stability_threshold": 0.02, |
|
"start_token_index": 2 |
|
}, |
|
"max_input_length": 2048, |
|
"max_total_tokens": 2051, |
|
"max_concurrent_requests": 128, |
|
"max_best_of": 2, |
|
"max_stop_sequences": 4, |
|
"max_top_n_tokens": 5, |
|
"waiting_served_ratio": 1.2, |
|
"max_batch_prefill_tokens": 2048, |
|
"max_waiting_tokens": 20, |
|
"sharded": false |
|
} |
|
|