bolasim's picture
Upload folder using huggingface_hub
b8dc3ee verified
raw
history blame
1.68 kB
{
"builder_config": {
"apply_query_key_layer_scaling": false,
"cross_attention": true,
"has_position_embedding": true,
"has_token_type_embedding": false,
"hidden_act": "gelu",
"hidden_size": 512,
"int8": true,
"max_batch_size": 8,
"max_beam_width": 5,
"max_input_len": 224,
"max_output_len": 512,
"max_position_embeddings": 448,
"name": "whisper_decoder",
"num_heads": 8,
"num_layers": 6,
"precision": "float16",
"strongly_typed": false,
"tensor_parallel": 1,
"use_refit": false,
"vocab_size": 51865
},
"plugin_config": {
"attention_qk_half_accumulation": false,
"bert_attention_plugin": null,
"context_fmha": true,
"context_fmha_fp32_acc": false,
"enable_xqa": false,
"gemm_plugin": "float16",
"gpt_attention_plugin": "float16",
"identity_plugin": null,
"layernorm_quantization_plugin": null,
"lookup_plugin": null,
"lora_plugin": null,
"mamba_conv1d_plugin": null,
"moe_plugin": null,
"multi_block_mode": false,
"multiple_profiles": false,
"nccl_plugin": null,
"paged_kv_cache": false,
"paged_state": false,
"quantize_per_token_plugin": false,
"quantize_tensor_plugin": false,
"remove_input_padding": false,
"rmsnorm_quantization_plugin": null,
"smooth_quant_gemm_plugin": null,
"streamingllm": false,
"tokens_per_block": 128,
"use_context_fmha_for_generation": false,
"use_custom_all_reduce": false,
"use_fp8_context_fmha": false,
"use_paged_context_fmha": false,
"weight_only_groupwise_quant_matmul_plugin": null,
"weight_only_quant_matmul_plugin": "float16"
}
}