YYT-t commited on
Commit
9938926
·
verified ·
1 Parent(s): f097ec1

Upload folder using huggingface_hub

Browse files
config.json CHANGED
@@ -1,27 +1,34 @@
1
  {
2
- "_name_or_path": "mistralai/Mistral-7B-Instruct-v0.3",
3
  "architectures": [
4
- "MistralForCausalLM"
5
  ],
 
6
  "attention_dropout": 0.0,
7
- "bos_token_id": 1,
8
- "eos_token_id": 2,
9
  "head_dim": 128,
10
  "hidden_act": "silu",
11
  "hidden_size": 4096,
12
  "initializer_range": 0.02,
13
- "intermediate_size": 14336,
14
- "max_position_embeddings": 32768,
15
- "model_type": "mistral",
 
16
  "num_attention_heads": 32,
17
  "num_hidden_layers": 32,
18
- "num_key_value_heads": 8,
19
- "rms_norm_eps": 1e-05,
20
- "rope_theta": 1000000.0,
21
- "sliding_window": null,
 
 
 
 
 
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.45.2",
25
  "use_cache": false,
26
- "vocab_size": 32768
27
  }
 
1
  {
2
+ "_name_or_path": "deepseek-ai/deepseek-coder-6.7b-instruct",
3
  "architectures": [
4
+ "LlamaForCausalLM"
5
  ],
6
+ "attention_bias": false,
7
  "attention_dropout": 0.0,
8
+ "bos_token_id": 32013,
9
+ "eos_token_id": 32021,
10
  "head_dim": 128,
11
  "hidden_act": "silu",
12
  "hidden_size": 4096,
13
  "initializer_range": 0.02,
14
+ "intermediate_size": 11008,
15
+ "max_position_embeddings": 16384,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
  "num_attention_heads": 32,
19
  "num_hidden_layers": 32,
20
+ "num_key_value_heads": 32,
21
+ "pretraining_tp": 1,
22
+ "rms_norm_eps": 1e-06,
23
+ "rope_scaling": {
24
+ "factor": 4.0,
25
+ "rope_type": "linear",
26
+ "type": "linear"
27
+ },
28
+ "rope_theta": 100000,
29
  "tie_word_embeddings": false,
30
  "torch_dtype": "bfloat16",
31
  "transformers_version": "4.45.2",
32
  "use_cache": false,
33
+ "vocab_size": 32256
34
  }
generation_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_from_model_config": true,
3
- "bos_token_id": 1,
4
- "eos_token_id": 2,
5
  "transformers_version": "4.45.2"
6
  }
 
1
  {
2
  "_from_model_config": true,
3
+ "bos_token_id": 32013,
4
+ "eos_token_id": 32021,
5
  "transformers_version": "4.45.2"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef8f1bbb8ea1500aff758e26805a6fb2350fcf5a5364e48334ff23b32275f286
3
- size 4949453792
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6407b124cb65168b87b77e438f5d00031b34663446e5050d4de8cf5b57297202
3
+ size 4941082504
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c288e45d5d06cc4530453c34a0c15fe041abc531eb43aa58c47b65844b48da24
3
- size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc7c5c28688017b1e7a7586a16781b1a4c68f51603b62fc78b26cf83f9ca1b3d
3
+ size 4947390880
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:324d1345691939da9278b0b66ffbe640e597b20dcd4cc768fd452c6ae4a65a72
3
- size 4546807800
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b73a6283c459ffd76cdd0d9f3470b1ef03d7c9b7d1b0315dd8e9da2d64200a52
3
+ size 3592585968
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 14496047104
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00003-of-00003.safetensors",
@@ -23,24 +23,24 @@
23
  "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
  "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
  "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
- "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
- "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
  "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
  "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
- "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
  "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
  "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
  "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
  "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
  "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
  "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
- "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
  "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
  "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
- "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
- "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
- "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
- "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
  "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
  "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
  "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
@@ -140,24 +140,24 @@
140
  "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
  "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
  "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
- "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
- "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
- "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
- "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
- "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
  "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
  "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
  "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
  "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
  "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
  "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
- "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
- "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
  "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
- "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
- "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
- "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
- "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
  "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
  "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
  "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 13481025536
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00003-of-00003.safetensors",
 
23
  "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
  "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
  "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
28
  "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
  "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
31
  "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
  "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
  "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
  "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
  "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
  "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
38
  "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
  "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
44
  "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
  "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
  "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
140
  "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
  "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
  "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
148
  "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
  "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
  "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
  "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
  "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
  "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
156
  "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
161
  "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
  "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
  "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
special_tokens_map.json CHANGED
@@ -1,24 +1,17 @@
1
  {
2
  "bos_token": {
3
- "content": "<s>",
4
  "lstrip": false,
5
- "normalized": false,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
- "content": "</s>",
11
  "lstrip": false,
12
- "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": "</s>",
17
- "unk_token": {
18
- "content": "<unk>",
19
- "lstrip": false,
20
- "normalized": false,
21
- "rstrip": false,
22
- "single_word": false
23
- }
24
  }
 
1
  {
2
  "bos_token": {
3
+ "content": "<|begin▁of▁sentence|>",
4
  "lstrip": false,
5
+ "normalized": true,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
+ "content": "<|EOT|>",
11
  "lstrip": false,
12
+ "normalized": true,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "<|EOT|>"
 
 
 
 
 
 
 
17
  }
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:60c3fc985cbfedcb429d05994efe548bdfecd6a00226fcdc8380c36fd894a3be
3
- size 3671968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f59e146b0bd8ac2002497d45d9facd45764807e900c36ff759bd77ec656badb
3
+ size 2289810
tokenizer_config.json CHANGED
The diff for this file is too large to render. See raw diff