yzsydlc commited on
Commit
8b1a262
·
verified ·
1 Parent(s): dfab415

tensorRT-LLM test

Browse files
Files changed (1) hide show
  1. config.json +57 -0
config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_config": {
3
+ "gather_context_logits": false,
4
+ "gather_generation_logits": false,
5
+ "hidden_act": "silu",
6
+ "hidden_size": 4096,
7
+ "int8": false,
8
+ "max_batch_size": 2,
9
+ "max_beam_width": 1,
10
+ "max_input_len": 2048,
11
+ "max_num_tokens": 4096,
12
+ "max_output_len": 2048,
13
+ "max_position_embeddings": 32768,
14
+ "max_prompt_embedding_table_size": 0,
15
+ "mlp_hidden_size": 22016,
16
+ "name": "qwen",
17
+ "num_heads": 32,
18
+ "num_kv_heads": 32,
19
+ "num_layers": 32,
20
+ "parallel_build": false,
21
+ "pipeline_parallel": 1,
22
+ "precision": "float16",
23
+ "quant_mode": 33,
24
+ "strongly_typed": false,
25
+ "tensor_parallel": 1,
26
+ "use_refit": false,
27
+ "vocab_size": 151936
28
+ },
29
+ "plugin_config": {
30
+ "attention_qk_half_accumulation": false,
31
+ "bert_attention_plugin": null,
32
+ "context_fmha": true,
33
+ "context_fmha_fp32_acc": false,
34
+ "enable_xqa": true,
35
+ "gemm_plugin": "float16",
36
+ "gpt_attention_plugin": "float16",
37
+ "identity_plugin": null,
38
+ "layernorm_quantization_plugin": null,
39
+ "lookup_plugin": null,
40
+ "lora_plugin": null,
41
+ "moe_plugin": null,
42
+ "multi_block_mode": false,
43
+ "nccl_plugin": null,
44
+ "paged_kv_cache": false,
45
+ "quantize_per_token_plugin": false,
46
+ "quantize_tensor_plugin": false,
47
+ "remove_input_padding": true,
48
+ "rmsnorm_quantization_plugin": null,
49
+ "smooth_quant_gemm_plugin": null,
50
+ "tokens_per_block": 128,
51
+ "use_context_fmha_for_generation": false,
52
+ "use_custom_all_reduce": false,
53
+ "use_paged_context_fmha": false,
54
+ "weight_only_groupwise_quant_matmul_plugin": "float16",
55
+ "weight_only_quant_matmul_plugin": null
56
+ }
57
+ }