Raincleared commited on
Commit
0cef4d2
·
verified ·
1 Parent(s): e1aefff

initial commit

Browse files
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openbmb/CPM-2B",
3
+ "architectures": [
4
+ "MiniCPMForCausalLM"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_minicpm.MiniCPMConfig",
8
+ "AutoModel": "modeling_minicpm.MiniCPMModel",
9
+ "AutoModelForCausalLM": "modeling_minicpm.MiniCPMForCausalLM",
10
+ "AutoModelForSeq2SeqLM": "modeling_minicpm.MiniCPMForCausalLM",
11
+ "AutoModelForSequenceClassification": "modeling_minicpm.MiniCPMForSequenceClassification"
12
+ },
13
+ "bos_token_id": 1,
14
+ "eos_token_id": 2,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 768,
17
+ "initializer_range": 0.1,
18
+ "intermediate_size": 5760,
19
+ "max_position_embeddings": 4096,
20
+ "num_attention_heads": 6,
21
+ "num_hidden_layers": 16,
22
+ "num_key_value_heads": 32,
23
+ "rms_norm_eps": 1e-05,
24
+ "rope_scaling": null,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.36.0",
27
+ "use_cache": true,
28
+ "vocab_size": 122753,
29
+ "scale_emb": 12,
30
+ "dim_model_base": 256,
31
+ "scale_depth": 1.4,
32
+ "tie_word_embeddings": false,
33
+ "norm_after_router": "rms",
34
+ "norm_scale": 0.2,
35
+ "attention_type": "mla",
36
+ "q_lora_rank": 768,
37
+ "kv_lora_rank": 256,
38
+ "qk_nope_head_dim": 128,
39
+ "qk_rope_head_dim": 64,
40
+ "v_head_dim": 128,
41
+ "ffn_type": "block",
42
+ "ffn_gated": true,
43
+ "router_act": "relu",
44
+ "expert_size": 64,
45
+ "num_experts": 48
46
+ }
configuration_minicpm.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ MiniCPM model configuration"""
21
+
22
+ from transformers.configuration_utils import PretrainedConfig
23
+ from transformers.utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ MINICPM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
29
+
30
+
31
+ class MiniCPMConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`MiniCPMModel`]. It is used to instantiate an MiniCPM
34
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
35
+ defaults will yield a similar configuration to that of the MiniCPM-7B.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 32000):
43
+ Vocabulary size of the MiniCPM model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`MiniCPMModel`]
45
+ hidden_size (`int`, *optional*, defaults to 4096):
46
+ Dimension of the hidden representations.
47
+ intermediate_size (`int`, *optional*, defaults to 11008):
48
+ Dimension of the MLP representations.
49
+ num_hidden_layers (`int`, *optional*, defaults to 32):
50
+ Number of hidden layers in the Transformer decoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 32):
52
+ Number of attention heads for each attention layer in the Transformer decoder.
53
+ num_key_value_heads (`int`, *optional*):
54
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
55
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
56
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
57
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
58
+ by meanpooling all the original heads within that group. For more details checkout [this
59
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
60
+ `num_attention_heads`.
61
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
62
+ The non-linear activation function (function or string) in the decoder.
63
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
64
+ The maximum sequence length that this model might ever be used with. MiniCPM 1 supports up to 2048 tokens,
65
+ MiniCPM 2 up to 4096, CodeMiniCPM up to 16384.
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
68
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
69
+ The epsilon used by the rms normalization layers.
70
+ use_cache (`bool`, *optional*, defaults to `True`):
71
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
72
+ relevant if `config.is_decoder=True`.
73
+ pad_token_id (`int`, *optional*):
74
+ Padding token id.
75
+ bos_token_id (`int`, *optional*, defaults to 1):
76
+ Beginning of stream token id.
77
+ eos_token_id (`int`, *optional*, defaults to 2):
78
+ End of stream token id.
79
+ pretraining_tp (`int`, *optional*, defaults to 1):
80
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
81
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
82
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
83
+ issue](https://github.com/pytorch/pytorch/issues/76232).
84
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
85
+ Whether to tie weight embeddings
86
+ rope_theta (`float`, *optional*, defaults to 10000.0):
87
+ The base period of the RoPE embeddings.
88
+ rope_scaling (`Dict`, *optional*):
89
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
90
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
91
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
92
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
93
+ these scaling strategies behave:
94
+ https://www.reddit.com/r/LocalMiniCPM/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
95
+ experimental feature, subject to breaking API changes in future versions.
96
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
97
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
98
+ attention_dropout (`float`, *optional*, defaults to 0.0):
99
+ The dropout ratio for the attention probabilities.
100
+ """
101
+
102
+ model_type = "minicpm"
103
+ keys_to_ignore_at_inference = ["past_key_values"]
104
+
105
+ def __init__(
106
+ self,
107
+ vocab_size=32000,
108
+ hidden_size=4096,
109
+ intermediate_size=11008,
110
+ num_hidden_layers=32,
111
+
112
+ attention_type="vanilla",
113
+ num_attention_heads=32,
114
+ num_key_value_heads=None,
115
+ qk_nope_head_dim=64,
116
+ qk_rope_head_dim=32,
117
+ q_lora_rank=768,
118
+ kv_lora_rank=256,
119
+ v_head_dim=None,
120
+
121
+ ffn_type = "vanilla",
122
+ ffn_gated=True,
123
+ hidden_act="silu",
124
+ router_act="relu",
125
+ expert_size=128,
126
+ num_experts=40,
127
+ moe_routing_strategy="topk",
128
+ num_shared_experts=0,
129
+ moe_top_k=2,
130
+ moe_top_p=0.3,
131
+ norm_after_router="sum",
132
+ norm_scale=1.0,
133
+
134
+ max_position_embeddings=4096,
135
+ initializer_range=0.02,
136
+ rms_norm_eps=1e-6,
137
+ use_cache=True,
138
+ pad_token_id=None,
139
+ bos_token_id=1,
140
+ eos_token_id=2,
141
+ pretraining_tp=1,
142
+ tie_word_embeddings=True,
143
+ rope_theta=10000.0,
144
+ rope_scaling=None,
145
+ attention_bias=False,
146
+ attention_dropout=0.0,
147
+ scale_emb=1,
148
+ dim_model_base=1,
149
+ scale_depth=1,
150
+ **kwargs,
151
+ ):
152
+ self.vocab_size = vocab_size
153
+ self.max_position_embeddings = max_position_embeddings
154
+ self.hidden_size = hidden_size
155
+ self.intermediate_size = intermediate_size
156
+ self.num_hidden_layers = num_hidden_layers
157
+
158
+ self.attention_type = attention_type
159
+ self.num_attention_heads = num_attention_heads
160
+ # for backward compatibility
161
+ if num_key_value_heads is None:
162
+ num_key_value_heads = num_attention_heads
163
+ self.qk_nope_head_dim = qk_nope_head_dim
164
+ self.qk_rope_head_dim = qk_rope_head_dim
165
+ self.q_lora_rank = q_lora_rank
166
+ self.kv_lora_rank = kv_lora_rank
167
+ if v_head_dim is None:
168
+ v_head_dim = qk_nope_head_dim
169
+ self.v_head_dim = v_head_dim
170
+ self.num_key_value_heads = num_key_value_heads
171
+
172
+ self.ffn_type = ffn_type
173
+ self.ffn_gated = ffn_gated
174
+ self.hidden_act = hidden_act
175
+ self.router_act = router_act
176
+ self.expert_size = expert_size
177
+ self.num_experts = num_experts
178
+
179
+ self.moe_top_k = moe_top_k
180
+ self.moe_top_p = moe_top_p
181
+ self.moe_routing_strategy = moe_routing_strategy
182
+ self.num_shared_experts = num_shared_experts
183
+ self.norm_after_router = norm_after_router
184
+ self.norm_scale = norm_scale
185
+
186
+ self.initializer_range = initializer_range
187
+ self.rms_norm_eps = rms_norm_eps
188
+ self.pretraining_tp = pretraining_tp
189
+ self.use_cache = use_cache
190
+ self.rope_theta = rope_theta
191
+ self.rope_scaling = rope_scaling
192
+ self._rope_scaling_validation()
193
+ self.attention_bias = attention_bias
194
+ self.attention_dropout = attention_dropout
195
+ self.scale_emb = scale_emb
196
+ self.dim_model_base = dim_model_base
197
+ self.scale_depth = scale_depth
198
+
199
+ super().__init__(
200
+ pad_token_id=pad_token_id,
201
+ bos_token_id=bos_token_id,
202
+ eos_token_id=eos_token_id,
203
+ tie_word_embeddings=tie_word_embeddings,
204
+ **kwargs,
205
+ )
206
+ try:
207
+ import flash_attn
208
+ self._attn_implementation = "flash_attention_2"
209
+ except:
210
+ pass
211
+
212
+ def _rope_scaling_validation(self):
213
+ """
214
+ Validate the `rope_scaling` configuration.
215
+ """
216
+ if self.rope_scaling is None:
217
+ return
218
+
219
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
220
+ raise ValueError(
221
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
222
+ f"got {self.rope_scaling}"
223
+ )
224
+ rope_scaling_type = self.rope_scaling.get("type", None)
225
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
226
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
227
+ raise ValueError(
228
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
229
+ )
230
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
231
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_sample": true,
3
+ "top_p": 0.8,
4
+ "temperature": 0.8,
5
+ "bos_token_id": 1,
6
+ "eos_token_id": 2
7
+ }
modeling_minicpm.py ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb74d51116831c3bf65db812c553f94ab0c88dcf97a5bbb37e3504f6d359c530
3
+ size 1181204
tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "model_max_length": 1000000000000000019884624838656,
22
+ "pad_token": null,
23
+ "sp_model_kwargs": {},
24
+ "tokenizer_class": "LlamaTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }