|
{ |
|
"_name_or_path": "/network/abhinav/models/gemma-2-2b-it", |
|
"architectures": [ |
|
"Gemma2ForCausalLM" |
|
], |
|
"attention_bias": false, |
|
"attention_dropout": 0.0, |
|
"attn_logit_softcapping": 50.0, |
|
"bos_token_id": 2, |
|
"cache_implementation": "hybrid", |
|
"eos_token_id": [ |
|
1, |
|
107 |
|
], |
|
"final_logit_softcapping": 30.0, |
|
"head_dim": 256, |
|
"hidden_act": "gelu_pytorch_tanh", |
|
"hidden_activation": "gelu_pytorch_tanh", |
|
"hidden_size": 2304, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 9216, |
|
"max_position_embeddings": 8192, |
|
"model_type": "gemma2", |
|
"num_attention_heads": 8, |
|
"num_hidden_layers": 26, |
|
"num_key_value_heads": 4, |
|
"pad_token_id": 0, |
|
"query_pre_attn_scalar": 256, |
|
"rms_norm_eps": 1e-06, |
|
"rope_theta": 10000.0, |
|
"sliding_window": 4096, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.43.3", |
|
"use_cache": true, |
|
"vocab_size": 256000, |
|
"quantization_config": { |
|
"config_groups": { |
|
"group_0": { |
|
"input_activations": null, |
|
"output_activations": null, |
|
"targets": [ |
|
"Linear" |
|
], |
|
"weights": { |
|
"block_structure": null, |
|
"dynamic": false, |
|
"group_size": 128, |
|
"num_bits": 4, |
|
"observer": "minmax", |
|
"observer_kwargs": {}, |
|
"strategy": "group", |
|
"symmetric": true, |
|
"type": "int" |
|
} |
|
} |
|
}, |
|
"format": "pack-quantized", |
|
"global_compression_ratio": 1.6663703555585414, |
|
"ignore": [ |
|
"lm_head" |
|
], |
|
"kv_cache_scheme": null, |
|
"quant_method": "compressed-tensors", |
|
"quantization_status": "frozen", |
|
"sparsity_config": { |
|
"format": "dense", |
|
"global_sparsity": 0.12283362266962997, |
|
"registry_requires_subclass": false, |
|
"sparsity_structure": "unstructured" |
|
} |
|
} |
|
} |