Crystalcareai commited on
Commit
f1e7d41
·
verified ·
1 Parent(s): 9218cf1

Upload configuration_quietqwen.py

Browse files
Files changed (1) hide show
  1. configuration_quietqwen.py +198 -0
configuration_quietqwen.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Qwen2MoE model configuration"""
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ QWEN2MOE_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "Qwen/Qwen1.5-MoE-A2.7B": "https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B/resolve/main/config.json",
25
+ }
26
+
27
+
28
+ class QuietQwenConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`Qwen2MoeModel`]. It is used to instantiate a
31
+ Qwen2MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
32
+ with the defaults will yield a similar configuration to that of
33
+ Qwen1.5-MoE-A2.7B" [Qwen/Qwen1.5-MoE-A2.7B"](https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B").
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 151936):
41
+ Vocabulary size of the Qwen2MoE model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`Qwen2MoeModel`]
43
+ hidden_size (`int`, *optional*, defaults to 2048):
44
+ Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 5632):
46
+ Dimension of the MLP representations.
47
+ num_hidden_layers (`int`, *optional*, defaults to 24):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 16):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ num_key_value_heads (`int`, *optional*, defaults to 16):
52
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
53
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
54
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
55
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
56
+ by meanpooling all the original heads within that group. For more details checkout [this
57
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
59
+ The non-linear activation function (function or string) in the decoder.
60
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
61
+ The maximum sequence length that this model might ever be used with.
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
65
+ The epsilon used by the rms normalization layers.
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
70
+ Whether the model's input and output word embeddings should be tied.
71
+ rope_theta (`float`, *optional*, defaults to 10000.0):
72
+ The base period of the RoPE embeddings.
73
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
74
+ Whether to use sliding window attention.
75
+ sliding_window (`int`, *optional*, defaults to 4096):
76
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
77
+ max_window_layers (`int`, *optional*, defaults to 28):
78
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
79
+ attention_dropout (`float`, *optional*, defaults to 0.0):
80
+ The dropout ratio for the attention probabilities.
81
+ decoder_sparse_step (`int`, *optional*, defaults to 1):
82
+ The frequency of the MoE layer.
83
+ moe_intermediate_size (`int`, *optional*, defaults to 1408):
84
+ Intermediate size of the routed expert.
85
+ shared_expert_intermediate_size (`int`, *optional*, defaults to 5632):
86
+ Intermediate size of the shared expert.
87
+ num_experts_per_tok (`int`, *optional*, defaults to 4):
88
+ Number of selected experts.
89
+ num_experts (`int`, *optional*, defaults to 60):
90
+ Number of routed experts.
91
+ norm_topk_prob (`bool`, *optional*, defaults to `False`):
92
+ Whether to normalize the topk probabilities.
93
+ output_router_logits (`bool`, *optional*, defaults to `False`):
94
+ Whether or not the router logits should be returned by the model. Enabeling this will also
95
+ allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
96
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
97
+ The aux loss factor for the total loss.
98
+
99
+ ```python
100
+ >>> from transformers import Qwen2MoeModel, Qwen2MoeConfig
101
+
102
+ >>> # Initializing a Qwen2MoE style configuration
103
+ >>> configuration = Qwen2MoeConfig()
104
+
105
+ >>> # Initializing a model from the Qwen1.5-MoE-A2.7B" style configuration
106
+ >>> model = Qwen2MoeModel(configuration)
107
+
108
+ >>> # Accessing the model configuration
109
+ >>> configuration = model.config
110
+ ```"""
111
+
112
+ model_type = "quietqwen"
113
+ keys_to_ignore_at_inference = ["past_key_values"]
114
+
115
+ def __init__(
116
+ self,
117
+ vocab_size=151936,
118
+ hidden_size=2048,
119
+ intermediate_size=5632,
120
+ num_hidden_layers=24,
121
+ num_attention_heads=16,
122
+ num_key_value_heads=16,
123
+ hidden_act="silu",
124
+ max_position_embeddings=32768,
125
+ initializer_range=0.02,
126
+ rms_norm_eps=1e-6,
127
+ use_cache=True,
128
+ tie_word_embeddings=False,
129
+ rope_theta=10000.0,
130
+ use_sliding_window=False,
131
+ sliding_window=4096,
132
+ max_window_layers=28,
133
+ attention_dropout=0.0,
134
+ decoder_sparse_step=1,
135
+ moe_intermediate_size=1408,
136
+ shared_expert_intermediate_size=5632,
137
+ num_experts_per_tok=4,
138
+ num_experts=60,
139
+ norm_topk_prob=False,
140
+ output_router_logits=False,
141
+ max_thoughts=16,
142
+ merged_talk_heads=True,
143
+ merged_lm_and_talk_heads=False,
144
+ merged_lm_and_think_heads=True,
145
+ use_concat_talk_head=True,
146
+ use_shallow_think=True,
147
+ use_shallow_talk=False,
148
+ use_complex_think_head=False,
149
+ use_complex_talk_head=True,
150
+ use_weighted_talk_head=True,
151
+ router_aux_loss_coef=0.001,
152
+ **kwargs,
153
+ ):
154
+ self.vocab_size = vocab_size
155
+ self.max_position_embeddings = max_position_embeddings
156
+ self.hidden_size = hidden_size
157
+ self.intermediate_size = intermediate_size
158
+ self.num_hidden_layers = num_hidden_layers
159
+ self.num_attention_heads = num_attention_heads
160
+ self.use_sliding_window = use_sliding_window
161
+ self.sliding_window = sliding_window
162
+ self.max_window_layers = max_window_layers
163
+
164
+ self.num_key_value_heads = num_key_value_heads
165
+ self.hidden_act = hidden_act
166
+ self.initializer_range = initializer_range
167
+ self.rms_norm_eps = rms_norm_eps
168
+ self.use_cache = use_cache
169
+ self.rope_theta = rope_theta
170
+ self.attention_dropout = attention_dropout
171
+
172
+ # MoE arguments
173
+ self.decoder_sparse_step = decoder_sparse_step
174
+ self.moe_intermediate_size = moe_intermediate_size
175
+ self.shared_expert_intermediate_size = shared_expert_intermediate_size
176
+ self.num_experts_per_tok = num_experts_per_tok
177
+ self.num_experts = num_experts
178
+ self.norm_topk_prob = norm_topk_prob
179
+ self.output_router_logits = output_router_logits
180
+ self.router_aux_loss_coef = router_aux_loss_coef
181
+
182
+ # quietstar arguments
183
+
184
+ self.max_thoughts = max_thoughts
185
+ self.merged_talk_heads = merged_talk_heads
186
+ self.merged_lm_and_talk_heads = merged_lm_and_talk_heads
187
+ self.merged_lm_and_think_heads = merged_lm_and_think_heads
188
+ self.use_concat_talk_head = use_concat_talk_head
189
+ self.use_shallow_think = use_shallow_think
190
+ self.use_shallow_talk = use_shallow_talk
191
+ self.use_complex_think_head = use_complex_think_head
192
+ self.use_complex_talk_head = use_complex_talk_head
193
+ self.use_weighted_talk_head = use_weighted_talk_head
194
+
195
+ super().__init__(
196
+ tie_word_embeddings=tie_word_embeddings,
197
+ **kwargs,
198
+ )