PyTorch
English
Chinese
plm
custom_code
jjw0126 commited on
Commit
4da77fb
·
verified ·
1 Parent(s): f78105b

Update config.json

Browse files
Files changed (1) hide show
  1. config.json +5 -7
config.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
  "architectures": [
3
- "EdgellmForCausalLM"
4
  ],
5
  "auto_map": {
6
- "AutoConfig": "configuration_edgellm.EdgellmConfig",
7
- "AutoModel": "modeling_edgellm.EdgellmModel",
8
- "AutoModelForCausalLM": "modeling_edgellm.EdgellmForCausalLM"
9
  },
10
  "attention_bias": false,
11
  "attention_dropout": 0.0,
@@ -17,7 +17,7 @@
17
  "intermediate_size": 8192,
18
  "kv_lora_rank": 512,
19
  "max_position_embeddings": 4096,
20
- "model_type": "edgellm",
21
  "num_attention_heads": 16,
22
  "num_key_value_heads": 16,
23
  "num_hidden_layers": 32,
@@ -28,12 +28,10 @@
28
  "rope_scaling": null,
29
  "pretraining_tp": 1,
30
  "rope_theta": 100000.0,
31
- "sliding_window": 4096,
32
  "tie_word_embeddings": true,
33
  "torch_dtype": "bfloat16",
34
  "transformers_version": "4.40.1",
35
  "use_cache": true,
36
- "use_sliding_window": false,
37
  "v_head_dim": 128,
38
  "vocab_size": 151936
39
  }
 
1
  {
2
  "architectures": [
3
+ "PLMForCausalLM"
4
  ],
5
  "auto_map": {
6
+ "AutoConfig": "configuration_plm.PLMConfig",
7
+ "AutoModel": "modeling_plm.PLMModel",
8
+ "AutoModelForCausalLM": "modeling_plm.PLMForCausalLM"
9
  },
10
  "attention_bias": false,
11
  "attention_dropout": 0.0,
 
17
  "intermediate_size": 8192,
18
  "kv_lora_rank": 512,
19
  "max_position_embeddings": 4096,
20
+ "model_type": "plm",
21
  "num_attention_heads": 16,
22
  "num_key_value_heads": 16,
23
  "num_hidden_layers": 32,
 
28
  "rope_scaling": null,
29
  "pretraining_tp": 1,
30
  "rope_theta": 100000.0,
 
31
  "tie_word_embeddings": true,
32
  "torch_dtype": "bfloat16",
33
  "transformers_version": "4.40.1",
34
  "use_cache": true,
 
35
  "v_head_dim": 128,
36
  "vocab_size": 151936
37
  }