Question Answering
Transformers
Safetensors
English
doge
text-generation
custom_code
JingzeShi commited on
Commit
9f515c5
verified
1 Parent(s): 86ec133

Upload DogeForCausalLM

Browse files
Files changed (4) hide show
  1. config.json +43 -43
  2. configuration_doge.py +228 -228
  3. generation_config.json +7 -7
  4. modeling_doge.py +1198 -0
config.json CHANGED
@@ -1,43 +1,43 @@
1
- {
2
- "_name_or_path": "/root/autodl-tmp/data/Doge-160M-Reason-Distill",
3
- "architectures": [
4
- "DogeForCausalLM"
5
- ],
6
- "attention_dropout": 0.0,
7
- "auto_map": {
8
- "AutoConfig": "configuration_doge.DogeConfig",
9
- "AutoModelForCausalLM": "modeling_old_doge.DogeForCausalLM"
10
- },
11
- "bos_token_id": 0,
12
- "dynamic_mask_ratio": 0.0,
13
- "eos_token_id": 1,
14
- "expert_retrieval_size": 64,
15
- "hidden_act": "silu",
16
- "hidden_bias": false,
17
- "hidden_dropout": 0.0,
18
- "hidden_size": 768,
19
- "initializer_range": 0.02,
20
- "intermediate_size": 1536,
21
- "is_moe": false,
22
- "max_position_embeddings": 2048,
23
- "model_type": "doge",
24
- "num_attention_heads": 6,
25
- "num_cdmoe_experts": 16348,
26
- "num_cdmoe_experts_per_head": 8,
27
- "num_cdmoe_heads": 4,
28
- "num_hidden_layers": 24,
29
- "num_key_value_heads": 3,
30
- "pad_token_id": 2,
31
- "rms_norm_eps": 1e-06,
32
- "rope_scaling": {
33
- "factor": 4.0,
34
- "original_max_position_embeddings": 2048,
35
- "rope_type": "dynamic"
36
- },
37
- "rope_theta": 10000.0,
38
- "tie_word_embeddings": true,
39
- "torch_dtype": "float32",
40
- "transformers_version": "4.49.0",
41
- "use_cache": true,
42
- "vocab_size": 32768
43
- }
 
1
+ {
2
+ "_name_or_path": "SmallDoge/Doge-160M-Reanson-Distill",
3
+ "architectures": [
4
+ "DogeForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_doge.DogeConfig",
9
+ "AutoModelForCausalLM": "modeling_doge.DogeForCausalLM"
10
+ },
11
+ "bos_token_id": 0,
12
+ "dynamic_mask_ratio": 0.0,
13
+ "eos_token_id": 1,
14
+ "expert_retrieval_size": 64,
15
+ "hidden_act": "silu",
16
+ "hidden_bias": false,
17
+ "hidden_dropout": 0.0,
18
+ "hidden_size": 768,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 1536,
21
+ "is_moe": false,
22
+ "max_position_embeddings": 2048,
23
+ "model_type": "doge",
24
+ "num_attention_heads": 6,
25
+ "num_cdmoe_experts": 16348,
26
+ "num_cdmoe_experts_per_head": 8,
27
+ "num_cdmoe_heads": 4,
28
+ "num_hidden_layers": 24,
29
+ "num_key_value_heads": 3,
30
+ "pad_token_id": 2,
31
+ "rms_norm_eps": 1e-06,
32
+ "rope_scaling": {
33
+ "factor": 4.0,
34
+ "original_max_position_embeddings": 2048,
35
+ "rope_type": "dynamic"
36
+ },
37
+ "rope_theta": 10000.0,
38
+ "tie_word_embeddings": true,
39
+ "torch_dtype": "float32",
40
+ "transformers_version": "4.48.3",
41
+ "use_cache": true,
42
+ "vocab_size": 32768
43
+ }
configuration_doge.py CHANGED
@@ -1,228 +1,228 @@
1
- # 馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃
2
- # This file was automatically generated from src/transformers/models/doge/modular_doge.py.
3
- # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
- # the file from the modular. If any change should be done, please apply the change to the
5
- # modular_doge.py file directly. One of our CI enforces this.
6
- # 馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃
7
- # coding=utf-8
8
- # Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
9
- #
10
- # This code is based on the Wonderful Matrices paper implementation.
11
- # The Doge family of small language models is trained by Jingze Shi.
12
- #
13
- # Licensed under the Apache License, Version 2.0 (the "License");
14
- # you may not use this file except in compliance with the License.
15
- # You may obtain a copy of the License at
16
- #
17
- # http://www.apache.org/licenses/LICENSE-2.0
18
- #
19
- # Unless required by applicable law or agreed to in writing, software
20
- # distributed under the License is distributed on an "AS IS" BASIS,
21
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22
- # See the License for the specific language governing permissions and
23
- # limitations under the License.
24
- from transformers.configuration_utils import PretrainedConfig
25
- from transformers.modeling_rope_utils import rope_config_validation
26
-
27
-
28
- class DogeConfig(PretrainedConfig):
29
- r"""
30
- This is the configuration class to store the configuration of a [`DogeModel`]. It is used to instantiate an Doge
31
- model according to the specified arguments, defining the model architecture like [SmallDoge/Doge-20M](https://huggingface.co/SmallDoge/Doge-20M).
32
-
33
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
- documentation from [`PretrainedConfig`] for more information.
35
-
36
- Args:
37
- vocab_size (`int`, *optional*, defaults to 32768):
38
- Vocabulary size of the Doge model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`DogeModel`]
39
- hidden_size (`int`, *optional*, defaults to 1024):
40
- Dimension of the hidden representations.
41
- intermediate_size (`int`, *optional*, defaults to 2048):
42
- Dimension of the MLP representations.
43
- num_hidden_layers (`int`, *optional*, defaults to 32):
44
- Number of hidden layers in the Transformer decoder.
45
- hidden_bias (`bool`, *optional*, defaults to `False`):
46
- Whether to use bias in the hidden layers.
47
- hidden_dropout (`float`, *optional*, defaults to 0.0):
48
- Dropout probability for each sequence transformation and state transformation module.
49
- hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
50
- The non-linear activation function (function or string) in the decoder.
51
- initializer_range (`float`, *optional*, defaults to 0.02):
52
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
53
- rms_norm_eps (`float`, *optional*, defaults to 1e-06):
54
- The epsilon used by the rms normalization layers.
55
- use_cache (`bool`, *optional*, defaults to `True`):
56
- Whether or not the model should return the last key/values attentions (not used by all models). Only
57
- relevant if `config.is_decoder=True`.
58
- bos_token_id (`int`, *optional*, defaults to 0):
59
- Beginning of stream token id.
60
- eos_token_id (`int`, *optional*, defaults to 1):
61
- End of stream token id.
62
- pad_token_id (`int`, *optional*, defaults to 2):
63
- Padding token id.
64
- tie_word_embeddings (`bool`, *optional*, defaults to `False`):
65
- Whether to tie weight embeddings
66
- max_position_embeddings (`int`, *optional*, defaults to 2048):
67
- The maximum sequence length that this model might ever be used with.
68
- rope_theta (`float`, *optional*, defaults to 10000.0):
69
- The base period of the RoPE embeddings.
70
- rope_scaling (`Dict`, *optional*):
71
- Dictionary containing the scaling configuration for the RoPE embeddings.
72
- NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly.
73
- Doge family of small models use `{ 'rope_type': 'dynamic', 'factor': 4.0, 'original_max_position_embeddings': 2048 }` as the default value.
74
- Expected contents:
75
- `rope_type` (`str`):
76
- The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation.
77
- `factor` (`float`, *optional*):
78
- Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings.
79
- In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length.
80
- `original_max_position_embeddings` (`int`, *optional*):
81
- Used with 'dynamic', 'longrope' and 'llama3'.
82
- The original max position embeddings used during pretraining.
83
- `attention_factor` (`float`, *optional*):
84
- Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
85
- computation.
86
- If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value.
87
- `beta_fast` (`float`, *optional*):
88
- Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
89
- ramp function. If unspecified, it defaults to 32.
90
- `beta_slow` (`float`, *optional*):
91
- Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
92
- ramp function. If unspecified, it defaults to 1.
93
- `short_factor` (`List[float]`, *optional*):
94
- Only used with 'longrope'. The scaling factor to be applied to short contexts (<`original_max_position_embeddings`).
95
- Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
96
- `long_factor` (`List[float]`, *optional*):
97
- Only used with 'longrope'. The scaling factor to be applied to long contexts (<`original_max_position_embeddings`).
98
- Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
99
- `low_freq_factor` (`float`, *optional*):
100
- Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
101
- `high_freq_factor` (`float`, *optional*):
102
- Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
103
- num_attention_heads (`int`, *optional*, defaults to 8):
104
- Number of attention heads for each attention layer in the Transformer decoder.
105
- num_key_value_heads (`int`, *optional*):
106
- This is the number of key_value heads that should be used to implement Grouped Query Attention.
107
- If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
108
- `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used.
109
- When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group.
110
- For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf).
111
- If it is not specified, will default to `num_attention_heads`.
112
- attention_dropout (`float`, *optional*, defaults to 0.0):
113
- The dropout ratio for the attention probabilities.
114
- dynamic_mask_ratio (`float`, *optional*, defaults to 0.0):
115
- The ratio to control the proportion of the dynamic mask filled with the minimum value. For more details checkout [this paper](https://arxiv.org/pdf/2412.11834).
116
- is_moe (`bool`, *optional*, defaults to `False`):
117
- Whether to use the Cross Domain Mixture of Experts, if `True`, the MoE will inherit the MLP to initialize. For more details checkout [this paper](https://arxiv.org/pdf/2412.11834).
118
- num_cdmoe_experts (`int`, *optional*, defaults to 16348):
119
- Number of Experts for the Cross Domain Mixture of Experts.
120
- num_cdmoe_heads (`int`, *optional*, defaults to 4):
121
- Number of retrieval heads, used to mix multi-head experts.
122
- num_cdmoe_experts_per_head (`int`, *optional*, defaults to 8):
123
- Number of Experts per retrieval head, used to mix multi-head experts.
124
- expert_retrieval_size (`int`, *optional*, defaults to 64):
125
- Dimension of the Expert retrieval states for calculating the dot product of query and key to determine the expert index.
126
-
127
- ```python
128
- >>> from transformers import DogeConfig, DogeModel
129
-
130
- >>> # Initializing a Doge-320M style configuration
131
- >>> configuration = DogeConfig()
132
-
133
- >>> # Initializing a model from the Doge-320M style configuration
134
- >>> model = DogeModel(configuration)
135
-
136
- >>> # Accessing the model configuration
137
- >>> configuration = model.config
138
- ```"""
139
-
140
- model_type = "doge"
141
- keys_to_ignore_at_inference = ["past_key_values"]
142
- # Default tensor parallel plan for base model `DogeModel`
143
- base_model_tp_plan = {
144
- "layers.*.self_attn.q_proj": "colwise",
145
- "layers.*.self_attn.k_proj": "colwise",
146
- "layers.*.self_attn.v_proj": "colwise",
147
- "layers.*.self_attn.dt_proj": "rowwise",
148
- "layers.*.self_attn.o_proj": "rowwise",
149
- "layers.*.mlp.gate_proj": "colwise",
150
- "layers.*.mlp.up_proj": "colwise",
151
- "layers.*.mlp.down_proj": "rowwise",
152
- }
153
-
154
- def __init__(
155
- self,
156
- vocab_size=32768,
157
- hidden_size=1024,
158
- intermediate_size=2048,
159
- num_hidden_layers=32,
160
- hidden_bias=False,
161
- hidden_dropout=0.0,
162
- hidden_act="silu",
163
- initializer_range=0.02,
164
- rms_norm_eps=1e-06,
165
- use_cache=True,
166
- bos_token_id=0,
167
- eos_token_id=1,
168
- pad_token_id=2,
169
- tie_word_embeddings=False,
170
- max_position_embeddings=2048,
171
- rope_theta=10000.0,
172
- rope_scaling=None,
173
- num_attention_heads=8,
174
- num_key_value_heads=None,
175
- attention_dropout=0.0,
176
- dynamic_mask_ratio=0.0,
177
- is_moe=False,
178
- num_cdmoe_experts=16348,
179
- num_cdmoe_heads=4,
180
- num_cdmoe_experts_per_head=8,
181
- expert_retrieval_size=64,
182
- **kwargs,
183
- ):
184
- self.vocab_size = vocab_size
185
- self.hidden_size = hidden_size
186
- self.intermediate_size = intermediate_size
187
- self.num_hidden_layers = num_hidden_layers
188
-
189
- self.hidden_bias = hidden_bias
190
- self.hidden_dropout = hidden_dropout
191
- self.hidden_act = hidden_act
192
- self.initializer_range = initializer_range
193
- self.rms_norm_eps = rms_norm_eps
194
- self.use_cache = use_cache
195
-
196
- self.max_position_embeddings = max_position_embeddings
197
- self.rope_theta = rope_theta
198
- self.rope_scaling = rope_scaling
199
- self.num_attention_heads = num_attention_heads
200
- self.num_key_value_heads = num_key_value_heads
201
- self.attention_dropout = attention_dropout
202
- self.dynamic_mask_ratio = dynamic_mask_ratio
203
- self.is_moe = is_moe
204
- self.num_cdmoe_experts = num_cdmoe_experts
205
- self.num_cdmoe_heads = num_cdmoe_heads
206
- self.num_cdmoe_experts_per_head = num_cdmoe_experts_per_head
207
- self.expert_retrieval_size = expert_retrieval_size
208
-
209
- # Validate the correctness of rotary position embeddings parameters
210
- # BC: if there is a 'type' field, copy it it to 'rope_type'.
211
- if self.rope_scaling is not None and "type" in self.rope_scaling:
212
- self.rope_scaling["rope_type"] = self.rope_scaling["type"]
213
- rope_config_validation(self)
214
-
215
- # for backward compatibility
216
- if num_key_value_heads is None:
217
- self.num_key_value_heads = num_attention_heads
218
-
219
- super().__init__(
220
- bos_token_id=bos_token_id,
221
- eos_token_id=eos_token_id,
222
- pad_token_id=pad_token_id,
223
- tie_word_embeddings=tie_word_embeddings,
224
- **kwargs,
225
- )
226
-
227
-
228
- __all__ = ["DogeConfig"]
 
1
+ # 馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃
2
+ # This file was automatically generated from src/transformers/models/doge/modular_doge.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_doge.py file directly. One of our CI enforces this.
6
+ # 馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃
7
+ # coding=utf-8
8
+ # Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
9
+ #
10
+ # This code is based on the Wonderful Matrices paper implementation.
11
+ # The Doge family of small language models is trained by Jingze Shi.
12
+ #
13
+ # Licensed under the Apache License, Version 2.0 (the "License");
14
+ # you may not use this file except in compliance with the License.
15
+ # You may obtain a copy of the License at
16
+ #
17
+ # http://www.apache.org/licenses/LICENSE-2.0
18
+ #
19
+ # Unless required by applicable law or agreed to in writing, software
20
+ # distributed under the License is distributed on an "AS IS" BASIS,
21
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22
+ # See the License for the specific language governing permissions and
23
+ # limitations under the License.
24
+ from transformers.configuration_utils import PretrainedConfig
25
+ from transformers.modeling_rope_utils import rope_config_validation
26
+
27
+
28
+ class DogeConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`DogeModel`]. It is used to instantiate an Doge
31
+ model according to the specified arguments, defining the model architecture like [SmallDoge/Doge-20M](https://huggingface.co/SmallDoge/Doge-20M).
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+ Args:
37
+ vocab_size (`int`, *optional*, defaults to 32768):
38
+ Vocabulary size of the Doge model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`DogeModel`]
39
+ hidden_size (`int`, *optional*, defaults to 1024):
40
+ Dimension of the hidden representations.
41
+ intermediate_size (`int`, *optional*, defaults to 2048):
42
+ Dimension of the MLP representations.
43
+ num_hidden_layers (`int`, *optional*, defaults to 32):
44
+ Number of hidden layers in the Transformer decoder.
45
+ hidden_bias (`bool`, *optional*, defaults to `False`):
46
+ Whether to use bias in the hidden layers.
47
+ hidden_dropout (`float`, *optional*, defaults to 0.0):
48
+ Dropout probability for each sequence transformation and state transformation module.
49
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
50
+ The non-linear activation function (function or string) in the decoder.
51
+ initializer_range (`float`, *optional*, defaults to 0.02):
52
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
53
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
54
+ The epsilon used by the rms normalization layers.
55
+ use_cache (`bool`, *optional*, defaults to `True`):
56
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
57
+ relevant if `config.is_decoder=True`.
58
+ bos_token_id (`int`, *optional*, defaults to 0):
59
+ Beginning of stream token id.
60
+ eos_token_id (`int`, *optional*, defaults to 1):
61
+ End of stream token id.
62
+ pad_token_id (`int`, *optional*, defaults to 2):
63
+ Padding token id.
64
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
65
+ Whether to tie weight embeddings
66
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
67
+ The maximum sequence length that this model might ever be used with.
68
+ rope_theta (`float`, *optional*, defaults to 10000.0):
69
+ The base period of the RoPE embeddings.
70
+ rope_scaling (`Dict`, *optional*):
71
+ Dictionary containing the scaling configuration for the RoPE embeddings.
72
+ NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly.
73
+ Doge family of small models use `{ 'rope_type': 'dynamic', 'factor': 4.0, 'original_max_position_embeddings': 2048 }` as the default value.
74
+ Expected contents:
75
+ `rope_type` (`str`):
76
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation.
77
+ `factor` (`float`, *optional*):
78
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings.
79
+ In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length.
80
+ `original_max_position_embeddings` (`int`, *optional*):
81
+ Used with 'dynamic', 'longrope' and 'llama3'.
82
+ The original max position embeddings used during pretraining.
83
+ `attention_factor` (`float`, *optional*):
84
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
85
+ computation.
86
+ If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value.
87
+ `beta_fast` (`float`, *optional*):
88
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
89
+ ramp function. If unspecified, it defaults to 32.
90
+ `beta_slow` (`float`, *optional*):
91
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
92
+ ramp function. If unspecified, it defaults to 1.
93
+ `short_factor` (`List[float]`, *optional*):
94
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<`original_max_position_embeddings`).
95
+ Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
96
+ `long_factor` (`List[float]`, *optional*):
97
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<`original_max_position_embeddings`).
98
+ Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
99
+ `low_freq_factor` (`float`, *optional*):
100
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
101
+ `high_freq_factor` (`float`, *optional*):
102
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
103
+ num_attention_heads (`int`, *optional*, defaults to 8):
104
+ Number of attention heads for each attention layer in the Transformer decoder.
105
+ num_key_value_heads (`int`, *optional*):
106
+ This is the number of key_value heads that should be used to implement Grouped Query Attention.
107
+ If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
108
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used.
109
+ When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group.
110
+ For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf).
111
+ If it is not specified, will default to `num_attention_heads`.
112
+ attention_dropout (`float`, *optional*, defaults to 0.0):
113
+ The dropout ratio for the attention probabilities.
114
+ dynamic_mask_ratio (`float`, *optional*, defaults to 0.0):
115
+ The ratio to control the proportion of the dynamic mask filled with the minimum value. For more details checkout [this paper](https://arxiv.org/pdf/2412.11834).
116
+ is_moe (`bool`, *optional*, defaults to `False`):
117
+ Whether to use the Cross Domain Mixture of Experts, if `True`, the MoE will inherit the MLP to initialize. For more details checkout [this paper](https://arxiv.org/pdf/2412.11834).
118
+ num_cdmoe_experts (`int`, *optional*, defaults to 16348):
119
+ Number of Experts for the Cross Domain Mixture of Experts.
120
+ num_cdmoe_heads (`int`, *optional*, defaults to 4):
121
+ Number of retrieval heads, used to mix multi-head experts.
122
+ num_cdmoe_experts_per_head (`int`, *optional*, defaults to 8):
123
+ Number of Experts per retrieval head, used to mix multi-head experts.
124
+ expert_retrieval_size (`int`, *optional*, defaults to 64):
125
+ Dimension of the Expert retrieval states for calculating the dot product of query and key to determine the expert index.
126
+
127
+ ```python
128
+ >>> from transformers import DogeConfig, DogeModel
129
+
130
+ >>> # Initializing a Doge-320M style configuration
131
+ >>> configuration = DogeConfig()
132
+
133
+ >>> # Initializing a model from the Doge-320M style configuration
134
+ >>> model = DogeModel(configuration)
135
+
136
+ >>> # Accessing the model configuration
137
+ >>> configuration = model.config
138
+ ```"""
139
+
140
+ model_type = "doge"
141
+ keys_to_ignore_at_inference = ["past_key_values"]
142
+ # Default tensor parallel plan for base model `DogeModel`
143
+ base_model_tp_plan = {
144
+ "layers.*.self_attn.q_proj": "colwise",
145
+ "layers.*.self_attn.k_proj": "colwise",
146
+ "layers.*.self_attn.v_proj": "colwise",
147
+ "layers.*.self_attn.dt_proj": "rowwise",
148
+ "layers.*.self_attn.o_proj": "rowwise",
149
+ "layers.*.mlp.gate_proj": "colwise",
150
+ "layers.*.mlp.up_proj": "colwise",
151
+ "layers.*.mlp.down_proj": "rowwise",
152
+ }
153
+
154
+ def __init__(
155
+ self,
156
+ vocab_size=32768,
157
+ hidden_size=1024,
158
+ intermediate_size=2048,
159
+ num_hidden_layers=32,
160
+ hidden_bias=False,
161
+ hidden_dropout=0.0,
162
+ hidden_act="silu",
163
+ initializer_range=0.02,
164
+ rms_norm_eps=1e-06,
165
+ use_cache=True,
166
+ bos_token_id=0,
167
+ eos_token_id=1,
168
+ pad_token_id=2,
169
+ tie_word_embeddings=False,
170
+ max_position_embeddings=2048,
171
+ rope_theta=10000.0,
172
+ rope_scaling=None,
173
+ num_attention_heads=8,
174
+ num_key_value_heads=None,
175
+ attention_dropout=0.0,
176
+ dynamic_mask_ratio=0.0,
177
+ is_moe=False,
178
+ num_cdmoe_experts=16348,
179
+ num_cdmoe_heads=4,
180
+ num_cdmoe_experts_per_head=8,
181
+ expert_retrieval_size=64,
182
+ **kwargs,
183
+ ):
184
+ self.vocab_size = vocab_size
185
+ self.hidden_size = hidden_size
186
+ self.intermediate_size = intermediate_size
187
+ self.num_hidden_layers = num_hidden_layers
188
+
189
+ self.hidden_bias = hidden_bias
190
+ self.hidden_dropout = hidden_dropout
191
+ self.hidden_act = hidden_act
192
+ self.initializer_range = initializer_range
193
+ self.rms_norm_eps = rms_norm_eps
194
+ self.use_cache = use_cache
195
+
196
+ self.max_position_embeddings = max_position_embeddings
197
+ self.rope_theta = rope_theta
198
+ self.rope_scaling = rope_scaling
199
+ self.num_attention_heads = num_attention_heads
200
+ self.num_key_value_heads = num_key_value_heads
201
+ self.attention_dropout = attention_dropout
202
+ self.dynamic_mask_ratio = dynamic_mask_ratio
203
+ self.is_moe = is_moe
204
+ self.num_cdmoe_experts = num_cdmoe_experts
205
+ self.num_cdmoe_heads = num_cdmoe_heads
206
+ self.num_cdmoe_experts_per_head = num_cdmoe_experts_per_head
207
+ self.expert_retrieval_size = expert_retrieval_size
208
+
209
+ # Validate the correctness of rotary position embeddings parameters
210
+ # BC: if there is a 'type' field, copy it it to 'rope_type'.
211
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
212
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
213
+ rope_config_validation(self)
214
+
215
+ # for backward compatibility
216
+ if num_key_value_heads is None:
217
+ self.num_key_value_heads = num_attention_heads
218
+
219
+ super().__init__(
220
+ bos_token_id=bos_token_id,
221
+ eos_token_id=eos_token_id,
222
+ pad_token_id=pad_token_id,
223
+ tie_word_embeddings=tie_word_embeddings,
224
+ **kwargs,
225
+ )
226
+
227
+
228
+ __all__ = ["DogeConfig"]
generation_config.json CHANGED
@@ -1,7 +1,7 @@
1
- {
2
- "_from_model_config": true,
3
- "bos_token_id": 0,
4
- "eos_token_id": 1,
5
- "pad_token_id": 2,
6
- "transformers_version": "4.49.0"
7
- }
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 2,
6
+ "transformers_version": "4.48.3"
7
+ }
modeling_doge.py ADDED
@@ -0,0 +1,1198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃
2
+ # This file was automatically generated from src/transformers/models/doge/modular_doge.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_doge.py file directly. One of our CI enforces this.
6
+ # 馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃馃毃
7
+ # coding=utf-8
8
+ # Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
9
+ #
10
+ # This code is based on the Wonderful Matrices paper implementation.
11
+ # The Doge family of small language models is trained by Jingze Shi.
12
+ #
13
+ # Licensed under the Apache License, Version 2.0 (the "License");
14
+ # you may not use this file except in compliance with the License.
15
+ # You may obtain a copy of the License at
16
+ #
17
+ # http://www.apache.org/licenses/LICENSE-2.0
18
+ #
19
+ # Unless required by applicable law or agreed to in writing, software
20
+ # distributed under the License is distributed on an "AS IS" BASIS,
21
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22
+ # See the License for the specific language governing permissions and
23
+ # limitations under the License.
24
+
25
+ import math
26
+ from typing import Callable, List, Optional, Tuple, Union
27
+
28
+ import torch
29
+ import torch.nn.functional as F
30
+ from torch import nn
31
+
32
+ from transformers.activations import ACT2FN
33
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
34
+ from transformers.generation import GenerationMixin
35
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
36
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
37
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
38
+ from transformers.modeling_utils import PreTrainedModel
39
+ from transformers.processing_utils import Unpack
40
+ from transformers.utils import (
41
+ LossKwargs,
42
+ add_start_docstrings,
43
+ add_start_docstrings_to_model_forward,
44
+ is_torch_flex_attn_available,
45
+ logging,
46
+ replace_return_docstrings,
47
+ )
48
+ from .configuration_doge import DogeConfig
49
+
50
+ if is_torch_flex_attn_available():
51
+ from torch.nn.attention.flex_attention import flex_attention
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CONFIG_FOR_DOC = "DogeConfig"
56
+
57
+
58
+ class DogeRMSNorm(nn.Module):
59
+ def __init__(self, hidden_size, eps=1e-6):
60
+ """
61
+ DogeRMSNorm is equivalent to T5LayerNorm
62
+ """
63
+ super().__init__()
64
+ self.weight = nn.Parameter(torch.ones(hidden_size))
65
+ self.variance_epsilon = eps
66
+
67
+ def forward(self, hidden_states):
68
+ input_dtype = hidden_states.dtype
69
+ hidden_states = hidden_states.to(torch.float32)
70
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
71
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
72
+ return self.weight * hidden_states.to(input_dtype)
73
+
74
+ def extra_repr(self):
75
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
76
+
77
+
78
+ class DogeResidual(nn.Module):
79
+ def __init__(self, hidden_size):
80
+ super().__init__()
81
+ self.weight = nn.Parameter(torch.ones(hidden_size))
82
+
83
+ def forward(self, residual_states, hidden_states):
84
+ return self.weight * residual_states + hidden_states
85
+
86
+ def extra_repr(self):
87
+ return f"{tuple(self.weight.shape)}"
88
+
89
+
90
+ class DogeRotaryEmbedding(nn.Module):
91
+ def __init__(self, config: DogeConfig, device=None):
92
+ super().__init__()
93
+ # BC: "rope_type" was originally "type"
94
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
95
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
96
+ else:
97
+ self.rope_type = "default"
98
+ self.max_seq_len_cached = config.max_position_embeddings
99
+ self.original_max_seq_len = config.max_position_embeddings
100
+
101
+ self.config = config
102
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
103
+
104
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
105
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
106
+ self.original_inv_freq = self.inv_freq
107
+
108
+ def _dynamic_frequency_update(self, position_ids, device):
109
+ """
110
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
111
+ 1 - growing beyond the cached sequence length (allow scaling)
112
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
113
+ """
114
+ seq_len = torch.max(position_ids) + 1
115
+ if seq_len > self.max_seq_len_cached: # growth
116
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
117
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
118
+ self.max_seq_len_cached = seq_len
119
+
120
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
121
+ # This .to() is needed if the model has been moved to a device after being initialized (because
122
+ # the buffer is automatically moved, but not the original copy)
123
+ self.original_inv_freq = self.original_inv_freq.to(device)
124
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
125
+ self.max_seq_len_cached = self.original_max_seq_len
126
+
127
+ @torch.no_grad()
128
+ def forward(self, x, position_ids):
129
+ if "dynamic" in self.rope_type:
130
+ self._dynamic_frequency_update(position_ids, device=x.device)
131
+
132
+ # Core RoPE block
133
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
134
+ position_ids_expanded = position_ids[:, None, :].float()
135
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
136
+ device_type = x.device.type
137
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
138
+ with torch.autocast(device_type=device_type, enabled=False):
139
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
140
+ emb = torch.cat((freqs, freqs), dim=-1)
141
+ cos = emb.cos()
142
+ sin = emb.sin()
143
+
144
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
145
+ cos = cos * self.attention_scaling
146
+ sin = sin * self.attention_scaling
147
+
148
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
149
+
150
+
151
+ def rotate_half(x):
152
+ """Rotates half the hidden dims of the input."""
153
+ x1 = x[..., : x.shape[-1] // 2]
154
+ x2 = x[..., x.shape[-1] // 2 :]
155
+ return torch.cat((-x2, x1), dim=-1)
156
+
157
+
158
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
159
+ """Applies Rotary Position Embedding to the query and key tensors.
160
+
161
+ Args:
162
+ q (`torch.Tensor`): The query tensor.
163
+ k (`torch.Tensor`): The key tensor.
164
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
165
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
166
+ position_ids (`torch.Tensor`, *optional*):
167
+ Deprecated and unused.
168
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
169
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
170
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
171
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
172
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
173
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
174
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
175
+ Returns:
176
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
177
+ """
178
+ cos = cos.unsqueeze(unsqueeze_dim)
179
+ sin = sin.unsqueeze(unsqueeze_dim)
180
+ q_embed = (q * cos) + (rotate_half(q) * sin)
181
+ k_embed = (k * cos) + (rotate_half(k) * sin)
182
+ return q_embed, k_embed
183
+
184
+
185
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
186
+ """
187
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
188
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
189
+ """
190
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
191
+ if n_rep == 1:
192
+ return hidden_states
193
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
194
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
195
+
196
+
197
+ def eager_attention_forward(
198
+ module: nn.Module,
199
+ query: torch.Tensor,
200
+ key: torch.Tensor,
201
+ value: torch.Tensor,
202
+ attention_mask: Optional[torch.Tensor],
203
+ scaling: float,
204
+ dropout: float = 0.0,
205
+ **kwargs,
206
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
207
+ key_states = repeat_kv(key, module.num_key_value_groups)
208
+ value_states = repeat_kv(value, module.num_key_value_groups)
209
+
210
+ attn_weights = torch.matmul(query, key_states.transpose(-1, -2)) * scaling
211
+ if attention_mask is not None:
212
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
213
+ attn_weights = attn_weights + causal_mask
214
+
215
+ attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
216
+ attn_weights = F.dropout(attn_weights, p=dropout, training=module.training)
217
+ attn_output = torch.matmul(attn_weights, value_states)
218
+ attn_output = attn_output.transpose(1, 2).contiguous()
219
+
220
+ return attn_output, attn_weights
221
+
222
+
223
+ def sdpa_attention_forward(
224
+ module: nn.Module,
225
+ query: torch.Tensor,
226
+ key: torch.Tensor,
227
+ value: torch.Tensor,
228
+ attention_mask: Optional[torch.Tensor],
229
+ dropout: float = 0.0,
230
+ scaling: Optional[float] = None,
231
+ is_causal: Optional[bool] = None,
232
+ **kwargs,
233
+ ) -> Tuple[torch.Tensor, None]:
234
+ key = repeat_kv(key, module.num_key_value_groups)
235
+ value = repeat_kv(value, module.num_key_value_groups)
236
+
237
+ causal_mask = attention_mask
238
+ if attention_mask is not None:
239
+ causal_mask = causal_mask[:, :, :, : key.shape[-2]]
240
+
241
+ # SDPA with memory-efficient backend is bugged with non-contiguous inputs and custom attn_mask for some torch versions
242
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
243
+ query = query.contiguous()
244
+ key = key.contiguous()
245
+ value = value.contiguous()
246
+
247
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
248
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
249
+ if is_causal is None:
250
+ is_causal = causal_mask is None and query.shape[2] > 1
251
+
252
+ # Shapes (e.g. query.shape[2]) are tensors during jit tracing, resulting in `is_causal` being a tensor.
253
+ # We convert it to a bool for the SDPA kernel that only accepts bools.
254
+ if torch.jit.is_tracing() and isinstance(is_causal, torch.Tensor):
255
+ is_causal = is_causal.item()
256
+
257
+ # NOTE: As of pytorch 2.5.1, SDPA backward pass of cuDNN is still incorrect, so we disable cuDNN SDPA (see https://github.com/pytorch/pytorch/issues/138581)
258
+ torch.backends.cuda.enable_cudnn_sdp(False)
259
+ attn_output = F.scaled_dot_product_attention(
260
+ query=query,
261
+ key=key,
262
+ value=value,
263
+ attn_mask=causal_mask,
264
+ dropout_p=dropout,
265
+ scale=scaling,
266
+ is_causal=is_causal,
267
+ )
268
+ attn_output = attn_output.transpose(1, 2).contiguous()
269
+
270
+ return attn_output, None
271
+
272
+
273
+ def flex_attention_forward(
274
+ module: nn.Module,
275
+ query: torch.Tensor,
276
+ key: torch.Tensor,
277
+ value: torch.Tensor,
278
+ attention_mask: Optional[torch.Tensor],
279
+ scaling: Optional[float] = None,
280
+ is_causal: Optional[bool] = None,
281
+ softcap: Optional[float] = None,
282
+ head_mask: Optional[torch.Tensor] = None,
283
+ **kwargs,
284
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
285
+ causal_mask = attention_mask
286
+ if attention_mask is not None:
287
+ causal_mask = causal_mask[:, :, :, : key.shape[-2]]
288
+
289
+ if is_causal is None:
290
+ is_causal = causal_mask is None and query.shape[2] > 1
291
+
292
+ def causal_mod(score, batch, head, q_idx, kv_idx):
293
+ if softcap is not None:
294
+ score = softcap * torch.tanh(score / softcap)
295
+ if causal_mask is not None:
296
+ score = score + causal_mask[batch][0][q_idx][kv_idx]
297
+ if head_mask is not None:
298
+ score = score + head_mask[batch][head][0][0]
299
+ return score
300
+
301
+ def dynamic_mod(score, batch, head, q_idx, kv_idx):
302
+ if softcap is not None:
303
+ score = softcap * torch.tanh(score / softcap)
304
+ if causal_mask is not None:
305
+ score = score + causal_mask[batch][head][q_idx][kv_idx]
306
+ if head_mask is not None:
307
+ score = score + head_mask[batch][head][0][0]
308
+ return score
309
+
310
+ # TODO: flex_attention: As of pytorch 2.5.1, captured buffers that require grad are not yet supported.
311
+ # NOTE: So we only use flex_attention in inference mode.
312
+ mask_mod = causal_mod if is_causal or module.training else dynamic_mod
313
+
314
+ attn_output, attention_weights = flex_attention(
315
+ query=query,
316
+ key=key,
317
+ value=value,
318
+ score_mod=mask_mod,
319
+ enable_gqa=True,
320
+ scale=scaling,
321
+ # Last time checked on PyTorch == 2.5.1: Flex Attention always computes the lse regardless.
322
+ # For simplification, we thus always return it as no additional computations are introduced.
323
+ return_lse=True,
324
+ )
325
+ # lse is returned in float32
326
+ attention_weights = attention_weights.to(value.dtype)
327
+ attn_output = attn_output.transpose(1, 2).contiguous()
328
+
329
+ return attn_output, attention_weights
330
+
331
+
332
+ ALL_ATTENTION_FUNCTIONS = {
333
+ "eager": eager_attention_forward,
334
+ "sdpa": sdpa_attention_forward,
335
+ "flex_attention": flex_attention_forward,
336
+ }
337
+
338
+
339
+ class DogeDynamicMaskAttention(nn.Module):
340
+ """Dynamic Mask Attention from 'Wonderful Matrices' paper."""
341
+
342
+ def __init__(self, config: DogeConfig, layer_idx: Optional[int] = None):
343
+ super().__init__()
344
+ self.config = config
345
+ self.layer_idx = layer_idx
346
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
347
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
348
+ self.scaling = self.head_dim**-0.5
349
+ self.attention_dropout = config.attention_dropout
350
+ self.dynamic_mask_ratio = config.dynamic_mask_ratio
351
+
352
+ self.q_proj = nn.Linear(
353
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.hidden_bias
354
+ )
355
+ self.k_proj = nn.Linear(
356
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.hidden_bias
357
+ )
358
+ self.v_proj = nn.Linear(
359
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.hidden_bias
360
+ )
361
+ # dynamic mask for the QK^T attention weights matrix
362
+ self.A = nn.Parameter(torch.zeros(config.num_attention_heads))
363
+ self.dt_proj = nn.Linear(
364
+ config.num_key_value_heads * self.head_dim, config.num_attention_heads, bias=config.hidden_bias
365
+ )
366
+ self.o_proj = nn.Linear(
367
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.hidden_bias
368
+ )
369
+
370
+ def forward(
371
+ self,
372
+ hidden_states: torch.Tensor,
373
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
374
+ attention_mask: Optional[torch.Tensor] = None,
375
+ past_key_value: Optional[Cache] = None,
376
+ cache_position: Optional[torch.LongTensor] = None,
377
+ **kwargs,
378
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
379
+ input_shape = hidden_states.shape[:-1]
380
+ hidden_shape = (*input_shape, -1, self.head_dim)
381
+
382
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
383
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
384
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
385
+
386
+ cos, sin = position_embeddings
387
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
388
+
389
+ if past_key_value is not None:
390
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
391
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
392
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
393
+
394
+ # calculate dynamic mask from value_states
395
+ dt_states = self.dt_proj(
396
+ value_states.transpose(1, 2).reshape(value_states.shape[0], value_states.shape[-2], -1)
397
+ )
398
+ dynamic_mask = torch.exp(self.A * F.softplus(dt_states)).transpose(-1, -2)
399
+ attn_mask = self.prepare_dynamic_mask(
400
+ hidden_states=hidden_states,
401
+ dynamic_mask=dynamic_mask,
402
+ dynamic_mask_ratio=self.dynamic_mask_ratio,
403
+ attention_mask=attention_mask,
404
+ )
405
+
406
+ attention_interface: Callable = eager_attention_forward
407
+ if self.config._attn_implementation != "eager":
408
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
409
+ logger.warning_once(
410
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
411
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
412
+ )
413
+ else:
414
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
415
+
416
+ attn_output, attn_weights = attention_interface(
417
+ self,
418
+ query_states,
419
+ key_states,
420
+ value_states,
421
+ attention_mask=attn_mask,
422
+ dropout=0.0 if not self.training else self.attention_dropout,
423
+ scaling=self.scaling,
424
+ **kwargs,
425
+ )
426
+
427
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
428
+ attn_output = self.o_proj(attn_output)
429
+ return attn_output, attn_weights
430
+
431
+ def prepare_dynamic_mask(
432
+ self,
433
+ hidden_states: torch.Tensor,
434
+ dynamic_mask: torch.Tensor,
435
+ dynamic_mask_ratio: float = 0.0,
436
+ attention_mask: Optional[torch.Tensor] = None,
437
+ ):
438
+ """
439
+ Combine `dynamic_mask` with `attention_mask` to generate the final `attn_mask`.
440
+
441
+ Args:
442
+ hidden_states (`torch.Tensor`): The input hidden_states, used to determine the minimum value of the current input precision.
443
+ dynamic_mask (`torch.Tensor`): dynamic mask of shape `(batch_size, num_heads, key_sequence_length)`.
444
+ dynamic_mask_ratio (`float`, *optional*): Ratio from 0.0 to 1.0 used to control the proportion of the dynamic mask filled with the minimum value.
445
+ attention_mask (`torch.Tensor`, *optional*): attention mask of shape `(batch_size, 1, query_sequence_length, key_sequence_length)`.
446
+ """
447
+ attn_mask = None
448
+ if dynamic_mask is not None:
449
+ attn_mask = dynamic_mask[:, :, None, :]
450
+ if 0.0 < dynamic_mask_ratio < 1.0:
451
+ min_type = torch.finfo(hidden_states.dtype).min
452
+ num_dynamic_mask = int(attn_mask.shape[-1] * dynamic_mask_ratio)
453
+ if num_dynamic_mask > 0:
454
+ rate_value = torch.kthvalue(attn_mask, num_dynamic_mask, dim=-1, keepdim=True).values
455
+ attn_mask = attn_mask.masked_fill(attn_mask < rate_value, min_type)
456
+ if attention_mask is not None:
457
+ attn_mask = attn_mask + attention_mask[:, :, :, : attn_mask.shape[-1]]
458
+ else:
459
+ attn_mask = attention_mask
460
+
461
+ return attn_mask
462
+
463
+
464
+ class DogeMLP(nn.Module):
465
+ def __init__(self, config: DogeConfig):
466
+ super().__init__()
467
+ self.hidden_dim = config.hidden_size
468
+ self.intermediate_dim = config.intermediate_size
469
+ self.act_fn = ACT2FN[config.hidden_act]
470
+
471
+ self.gate_proj = nn.Linear(self.hidden_dim, self.intermediate_dim, bias=config.hidden_bias)
472
+ self.up_proj = nn.Linear(self.hidden_dim, self.intermediate_dim, bias=config.hidden_bias)
473
+ self.down_proj = nn.Linear(self.intermediate_dim, self.hidden_dim, bias=config.hidden_bias)
474
+
475
+ def forward(
476
+ self,
477
+ hidden_states: torch.Tensor,
478
+ **kwargs,
479
+ ) -> torch.Tensor:
480
+ hidden_states = self.down_proj(self.act_fn(self.gate_proj(hidden_states)) * self.up_proj(hidden_states))
481
+ return hidden_states
482
+
483
+
484
+ class DogeCDMoE(DogeMLP):
485
+ """Cross Domain Mixture of Experts from 'Wonderful Matrices' paper."""
486
+
487
+ def __init__(self, config: DogeConfig):
488
+ super().__init__(config)
489
+ self.hidden_dim = config.hidden_size
490
+ self.act_fn = ACT2FN[config.hidden_act]
491
+
492
+ self.expert_retrieval_dim = config.expert_retrieval_size
493
+ self.num_cdmoe_experts = config.num_cdmoe_experts
494
+ self.num_cdmoe_heads = config.num_cdmoe_heads
495
+ self.num_cdmoe_experts_per_head = config.num_cdmoe_experts_per_head
496
+ self.num_keys = int(math.sqrt(self.num_cdmoe_experts))
497
+
498
+ # queries and keys for retrieval experts
499
+ self.queries_proj = nn.Linear(self.hidden_dim, self.num_cdmoe_heads * self.expert_retrieval_dim, bias=False)
500
+ self.keys = nn.Parameter(torch.zeros(self.num_cdmoe_heads, self.expert_retrieval_dim, self.num_keys))
501
+
502
+ # experts
503
+ self.down_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
504
+ self.up_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
505
+
506
+ def forward(
507
+ self,
508
+ hidden_states: torch.Tensor,
509
+ **kwargs,
510
+ ) -> torch.Tensor:
511
+ bsz, seq_len, _ = hidden_states.shape
512
+
513
+ # get routing weights with queries and keys
514
+ queries = self.queries_proj(hidden_states)
515
+ queries = queries.view(2, self.num_cdmoe_heads, bsz * seq_len, -1)
516
+ keys = self.keys.view(2, self.num_cdmoe_heads, -1, self.num_keys)
517
+ routing_weights = torch.matmul(queries, keys)
518
+ routing_weights = routing_weights.transpose(-2, -3).view(2, bsz, seq_len, self.num_cdmoe_heads, self.num_keys)
519
+
520
+ # get experts with the highest routing weights
521
+ (scores_x, scores_y), (indices_x, indices_y) = routing_weights.topk(self.num_cdmoe_experts_per_head, dim=-1)
522
+ all_scores = scores_x.unsqueeze(-1) + scores_y.unsqueeze(-2)
523
+ all_scores = all_scores.view(*scores_x.shape[:-1], -1)
524
+ all_indices = (indices_x.unsqueeze(-1) * self.num_keys) + indices_y.unsqueeze(-2)
525
+ all_indices = all_indices.view(*indices_x.shape[:-1], -1)
526
+ scores, pk_indices = all_scores.topk(self.num_cdmoe_experts_per_head, dim=-1)
527
+ indices = all_indices.gather(-1, pk_indices)
528
+ down_embed = self.down_embed(indices)
529
+ up_embed = self.up_embed(indices)
530
+
531
+ # mix experts states with cross domain states
532
+ experts_weights = torch.sum(hidden_states[:, :, None, None, :] * down_embed, dim=-1)
533
+ experts_weights = self.act_fn(experts_weights) * scores.softmax(dim=-1)
534
+ experts_states = torch.sum(experts_weights[:, :, :, :, None] * up_embed, dim=(-2, -3))
535
+ hidden_states = self.down_proj(self.act_fn(self.gate_proj(hidden_states)) * self.up_proj(hidden_states))
536
+ hidden_states = hidden_states + experts_states
537
+ return hidden_states
538
+
539
+
540
+ class DogeDecoderLayer(nn.Module):
541
+ def __init__(self, config: DogeConfig, layer_idx: Optional[int] = None):
542
+ super().__init__()
543
+ self.hidden_dropout = config.hidden_dropout
544
+
545
+ self.pre_layernorm = DogeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
546
+ self.self_attn = DogeDynamicMaskAttention(config=config, layer_idx=layer_idx)
547
+ self.pre_residual = DogeResidual(config.hidden_size)
548
+
549
+ self.post_layernorm = DogeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
550
+ self.feed_forward = DogeMLP(config) if not config.is_moe else DogeCDMoE(config)
551
+ self.post_residual = DogeResidual(config.hidden_size)
552
+
553
+ def forward(
554
+ self,
555
+ hidden_states: torch.Tensor,
556
+ attention_mask: Optional[torch.Tensor] = None,
557
+ position_ids: Optional[torch.LongTensor] = None,
558
+ past_key_value: Optional[Cache] = None,
559
+ output_attentions: Optional[bool] = False,
560
+ use_cache: Optional[bool] = False,
561
+ cache_position: Optional[torch.LongTensor] = None,
562
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
563
+ **kwargs,
564
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
565
+ # sequence transformation
566
+ residual = hidden_states
567
+ hidden_states = self.pre_layernorm(hidden_states)
568
+ hidden_states, self_attn_weights = self.self_attn(
569
+ hidden_states=hidden_states,
570
+ attention_mask=attention_mask,
571
+ position_ids=position_ids,
572
+ past_key_value=past_key_value,
573
+ output_attentions=output_attentions,
574
+ use_cache=use_cache,
575
+ cache_position=cache_position,
576
+ position_embeddings=position_embeddings,
577
+ **kwargs,
578
+ )
579
+ self_attn_weights = None
580
+ hidden_states = F.dropout(hidden_states, p=self.hidden_dropout, training=self.training)
581
+ hidden_states = self.pre_residual(residual, hidden_states)
582
+
583
+ # state transformation
584
+ residual = hidden_states
585
+ hidden_states = self.post_layernorm(hidden_states)
586
+ hidden_states = self.feed_forward(hidden_states)
587
+ hidden_states = F.dropout(hidden_states, p=self.hidden_dropout, training=self.training)
588
+ hidden_states = self.post_residual(residual, hidden_states)
589
+
590
+ outputs = (hidden_states,)
591
+ if output_attentions:
592
+ outputs += (self_attn_weights,)
593
+
594
+ return outputs
595
+
596
+
597
+ DOGE_START_DOCSTRING = r"""
598
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
599
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
600
+ etc.)
601
+
602
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
603
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
604
+ and behavior.
605
+
606
+ Parameters:
607
+ config ([`DogeConfig`]):
608
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
609
+ load the weights associated with the model, only the configuration. Check out the
610
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
611
+ """
612
+
613
+
614
+ @add_start_docstrings(
615
+ "The bare Doge Model outputting raw hidden-states without any specific head on top.",
616
+ DOGE_START_DOCSTRING,
617
+ )
618
+ class DogePreTrainedModel(PreTrainedModel):
619
+ config_class = DogeConfig
620
+ base_model_prefix = "model"
621
+ supports_gradient_checkpointing = True
622
+ _no_split_modules = ["DogeDecoderLayer"]
623
+ _skip_keys_device_placement = ["past_key_values"]
624
+ _supports_sdpa = True
625
+ # _supports_flex_attn = True # TODO: enable this when flex_attention is fully supported
626
+ _supports_cache_class = True
627
+ _supports_quantized_cache = True
628
+ _supports_static_cache = True
629
+
630
+ def _init_weights(self, module):
631
+ std = self.config.initializer_range
632
+ if isinstance(module, (nn.Linear)):
633
+ module.weight.data.normal_(mean=0.0, std=std)
634
+ if module.bias is not None:
635
+ module.bias.data.zero_()
636
+ elif isinstance(module, nn.Embedding):
637
+ module.weight.data.normal_(mean=0.0, std=std)
638
+ if module.padding_idx is not None:
639
+ module.weight.data[module.padding_idx].zero_()
640
+
641
+
642
+ DOGE_INPUTS_DOCSTRING = r"""
643
+ Args:
644
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
645
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
646
+ it.
647
+
648
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
649
+ [`PreTrainedTokenizer.__call__`] for details.
650
+
651
+ [What are input IDs?](../glossary#input-ids)
652
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
653
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
654
+
655
+ - 1 for tokens that are **not masked**,
656
+ - 0 for tokens that are **masked**.
657
+
658
+ [What are attention masks?](../glossary#attention-mask)
659
+
660
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
661
+ [`PreTrainedTokenizer.__call__`] for details.
662
+
663
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
664
+ `past_key_values`).
665
+
666
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
667
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
668
+ information on the default strategy.
669
+
670
+ - 1 indicates the head is **not masked**,
671
+ - 0 indicates the head is **masked**.
672
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
673
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
674
+ config.n_positions - 1]`.
675
+
676
+ [What are position IDs?](../glossary#position-ids)
677
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
678
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
679
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
680
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
681
+
682
+ Two formats are allowed:
683
+ - a [`~cache_utils.Cache`] instance, see our
684
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
685
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
686
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
687
+ cache format.
688
+
689
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
690
+ legacy cache format will be returned.
691
+
692
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
693
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
694
+ of shape `(batch_size, sequence_length)`.
695
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
696
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
697
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
698
+ model's internal embedding lookup matrix.
699
+ use_cache (`bool`, *optional*):
700
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
701
+ `past_key_values`).
702
+ output_attentions (`bool`, *optional*):
703
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
704
+ tensors for more detail.
705
+ output_hidden_states (`bool`, *optional*):
706
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
707
+ more detail.
708
+ return_dict (`bool`, *optional*):
709
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
710
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
711
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
712
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
713
+ the complete sequence length.
714
+ """
715
+
716
+
717
+ @add_start_docstrings(
718
+ "The bare Doge Model outputting raw hidden-states without any specific head on top.",
719
+ DOGE_START_DOCSTRING,
720
+ )
721
+ class DogeModel(DogePreTrainedModel):
722
+ """
723
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`DogeDecoderLayer`]
724
+
725
+ Args:
726
+ config: DogeConfig
727
+ """
728
+
729
+ def __init__(self, config: DogeConfig):
730
+ super().__init__(config)
731
+ self.config = config
732
+ self.padding_idx = config.pad_token_id
733
+ self.vocab_size = config.vocab_size
734
+
735
+ self.word_embed = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
736
+ self.rotary_emb = DogeRotaryEmbedding(config)
737
+ self.layers = nn.ModuleList(
738
+ [DogeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
739
+ )
740
+ self.final_layernorm = DogeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
741
+ self.gradient_checkpointing = False
742
+
743
+ # Initialize weights and apply final processing
744
+ self.post_init()
745
+
746
+ def get_input_embeddings(self):
747
+ return self.word_embed
748
+
749
+ def set_input_embeddings(self, value):
750
+ self.word_embed = value
751
+
752
+ @add_start_docstrings_to_model_forward(DOGE_INPUTS_DOCSTRING)
753
+ def forward(
754
+ self,
755
+ input_ids: torch.LongTensor = None,
756
+ attention_mask: Optional[torch.Tensor] = None,
757
+ position_ids: Optional[torch.LongTensor] = None,
758
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
759
+ inputs_embeds: Optional[torch.FloatTensor] = None,
760
+ use_cache: Optional[bool] = None,
761
+ output_attentions: Optional[bool] = None,
762
+ output_hidden_states: Optional[bool] = None,
763
+ return_dict: Optional[bool] = None,
764
+ cache_position: Optional[torch.LongTensor] = None,
765
+ **kwargs,
766
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
767
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
768
+ output_hidden_states = (
769
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
770
+ )
771
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
772
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
773
+
774
+ if (input_ids is None) ^ (inputs_embeds is not None):
775
+ raise ValueError("You cannot specify both input_ids and inputs_embeds")
776
+
777
+ if self.gradient_checkpointing and self.training and use_cache:
778
+ logger.warning_once(
779
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
780
+ )
781
+ use_cache = False
782
+
783
+ if inputs_embeds is None:
784
+ inputs_embeds = self.word_embed(input_ids)
785
+
786
+ if use_cache and past_key_values is None:
787
+ past_key_values = DynamicCache()
788
+
789
+ if cache_position is None:
790
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
791
+ cache_position = torch.arange(
792
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
793
+ )
794
+
795
+ if position_ids is None:
796
+ position_ids = cache_position.unsqueeze(0)
797
+
798
+ causal_mask = self._update_causal_mask(
799
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
800
+ )
801
+
802
+ hidden_states = inputs_embeds
803
+
804
+ # create position embeddings to be shared across the decoder layers
805
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
806
+
807
+ # decoder layers
808
+ all_hidden_states = () if output_hidden_states else None
809
+ all_self_attns = () if output_attentions else None
810
+
811
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
812
+ if output_hidden_states:
813
+ all_hidden_states += (hidden_states,)
814
+
815
+ if self.gradient_checkpointing and self.training:
816
+ layer_outputs = self._gradient_checkpointing_func(
817
+ decoder_layer.__call__,
818
+ hidden_states,
819
+ causal_mask,
820
+ position_ids,
821
+ past_key_values,
822
+ output_attentions,
823
+ use_cache,
824
+ cache_position,
825
+ position_embeddings,
826
+ )
827
+ else:
828
+ layer_outputs = decoder_layer(
829
+ hidden_states,
830
+ attention_mask=causal_mask,
831
+ position_ids=position_ids,
832
+ past_key_value=past_key_values,
833
+ output_attentions=output_attentions,
834
+ use_cache=use_cache,
835
+ cache_position=cache_position,
836
+ position_embeddings=position_embeddings,
837
+ **kwargs,
838
+ )
839
+
840
+ hidden_states = layer_outputs[0]
841
+
842
+ if output_attentions:
843
+ all_self_attns += (layer_outputs[1],)
844
+
845
+ hidden_states = self.final_layernorm(hidden_states)
846
+
847
+ # add hidden states from the last decoder layer
848
+ if output_hidden_states:
849
+ all_hidden_states += (hidden_states,)
850
+
851
+ output = BaseModelOutputWithPast(
852
+ last_hidden_state=hidden_states,
853
+ past_key_values=past_key_values if use_cache else None,
854
+ hidden_states=all_hidden_states,
855
+ attentions=all_self_attns,
856
+ )
857
+ return output if return_dict else output.to_tuple()
858
+
859
+ def _update_causal_mask(
860
+ self,
861
+ attention_mask: torch.Tensor,
862
+ input_tensor: torch.Tensor,
863
+ cache_position: torch.Tensor,
864
+ past_key_values: Cache,
865
+ output_attentions: bool,
866
+ ):
867
+ # We have to provide attention_mask for dynamic mask computation
868
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
869
+ using_static_cache = isinstance(past_key_values, StaticCache)
870
+
871
+ dtype, device = input_tensor.dtype, input_tensor.device
872
+ sequence_length = input_tensor.shape[1]
873
+ if using_static_cache:
874
+ target_length = past_key_values.get_max_cache_shape()
875
+ else:
876
+ target_length = (
877
+ attention_mask.shape[-1]
878
+ if isinstance(attention_mask, torch.Tensor)
879
+ else past_seen_tokens + sequence_length + 1
880
+ )
881
+
882
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
883
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
884
+ attention_mask,
885
+ sequence_length=sequence_length,
886
+ target_length=target_length,
887
+ dtype=dtype,
888
+ device=device,
889
+ cache_position=cache_position,
890
+ batch_size=input_tensor.shape[0],
891
+ )
892
+
893
+ if (
894
+ self.config._attn_implementation == "sdpa"
895
+ and attention_mask is not None
896
+ and attention_mask.device.type in ["cuda", "xpu"]
897
+ and not output_attentions
898
+ ):
899
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
900
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
901
+ # Details: https://github.com/pytorch/pytorch/issues/110213
902
+ min_dtype = torch.finfo(dtype).min
903
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
904
+
905
+ return causal_mask
906
+
907
+ @staticmethod
908
+ def _prepare_4d_causal_attention_mask_with_cache_position(
909
+ attention_mask: torch.Tensor,
910
+ sequence_length: int,
911
+ target_length: int,
912
+ dtype: torch.dtype,
913
+ device: torch.device,
914
+ cache_position: torch.Tensor,
915
+ batch_size: int,
916
+ **kwargs,
917
+ ):
918
+ """
919
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
920
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
921
+
922
+ Args:
923
+ attention_mask (`torch.Tensor`):
924
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
925
+ `(batch_size, 1, query_length, key_value_length)`.
926
+ sequence_length (`int`):
927
+ The sequence length being processed.
928
+ target_length (`int`):
929
+ The target length: when generating with static cache, the mask should be as long as the static cache,
930
+ to account for the 0 padding, the part of the cache that is not filled yet.
931
+ dtype (`torch.dtype`):
932
+ The dtype to use for the 4D attention mask.
933
+ device (`torch.device`):
934
+ The device to plcae the 4D attention mask on.
935
+ cache_position (`torch.Tensor`):
936
+ Indices depicting the position of the input sequence tokens in the sequence.
937
+ batch_size (`torch.Tensor`):
938
+ Batch size.
939
+ """
940
+ if attention_mask is not None and attention_mask.dim() == 4:
941
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
942
+ causal_mask = attention_mask
943
+ else:
944
+ min_dtype = torch.finfo(dtype).min
945
+ causal_mask = torch.full(
946
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
947
+ )
948
+ if sequence_length != 1:
949
+ causal_mask = torch.triu(causal_mask, diagonal=1)
950
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
951
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
952
+ if attention_mask is not None:
953
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
954
+ mask_length = attention_mask.shape[-1]
955
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
956
+ padding_mask = padding_mask == 0
957
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
958
+ padding_mask, min_dtype
959
+ )
960
+
961
+ return causal_mask
962
+
963
+
964
+ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
965
+ _tied_weights_keys = ["lm_head.weight"]
966
+ _tp_plan = {"lm_head": "colwise_rep"}
967
+
968
+ def __init__(self, config: DogeConfig):
969
+ super().__init__(config)
970
+ self.config = config
971
+ self.model = DogeModel(config)
972
+ self.vocab_size = config.vocab_size
973
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
974
+
975
+ # Initialize weights and apply final processing
976
+ self.post_init()
977
+
978
+ def get_input_embeddings(self):
979
+ return self.model.word_embed
980
+
981
+ def set_input_embeddings(self, value):
982
+ self.model.word_embed = value
983
+
984
+ def get_output_embeddings(self):
985
+ return self.lm_head
986
+
987
+ def set_output_embeddings(self, new_embeddings):
988
+ self.lm_head = new_embeddings
989
+
990
+ def get_decoder(self):
991
+ return self.model
992
+
993
+ def set_decoder(self, decoder):
994
+ self.model = decoder
995
+
996
+ @add_start_docstrings_to_model_forward(DOGE_INPUTS_DOCSTRING)
997
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
998
+ def forward(
999
+ self,
1000
+ input_ids: torch.LongTensor = None,
1001
+ attention_mask: Optional[torch.Tensor] = None,
1002
+ position_ids: Optional[torch.LongTensor] = None,
1003
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1004
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1005
+ labels: Optional[torch.LongTensor] = None,
1006
+ use_cache: Optional[bool] = None,
1007
+ output_attentions: Optional[bool] = None,
1008
+ output_hidden_states: Optional[bool] = None,
1009
+ return_dict: Optional[bool] = None,
1010
+ cache_position: Optional[torch.LongTensor] = None,
1011
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1012
+ **kwargs: Unpack[LossKwargs],
1013
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1014
+ r"""
1015
+ Args:
1016
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1017
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1018
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1019
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1020
+
1021
+ logits_to_keep (`int`, *optional*):
1022
+ If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
1023
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
1024
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
1025
+ If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
1026
+ This is useful when using packed tensor format (single dimension for batch and sequence length).
1027
+
1028
+ Returns:
1029
+
1030
+ Example:
1031
+
1032
+ ```python
1033
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM
1034
+
1035
+ >>> model = AutoModelForCausalLM.from_pretrained("SmallDoge/Doge-20M")
1036
+ >>> tokenizer = AutoTokenizer.from_pretrained("SmallDoge/Doge-20M")
1037
+
1038
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1039
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1040
+
1041
+ >>> # Generate
1042
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1043
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1044
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1045
+ ```"""
1046
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1047
+ output_hidden_states = (
1048
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1049
+ )
1050
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1051
+
1052
+ # decoder output consists of (dec_features, layer_state, dec_hidden, dec_attn)
1053
+ outputs = self.model(
1054
+ input_ids=input_ids,
1055
+ attention_mask=attention_mask,
1056
+ position_ids=position_ids,
1057
+ past_key_values=past_key_values,
1058
+ inputs_embeds=inputs_embeds,
1059
+ use_cache=use_cache,
1060
+ output_attentions=output_attentions,
1061
+ output_hidden_states=output_hidden_states,
1062
+ return_dict=return_dict,
1063
+ cache_position=cache_position,
1064
+ **kwargs,
1065
+ )
1066
+
1067
+ hidden_states = outputs[0]
1068
+ # only compute necessary logits, and do not upcast them to float if we are not computing the loss
1069
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1070
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1071
+
1072
+ loss = None
1073
+ if labels is not None:
1074
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.vocab_size, **kwargs)
1075
+
1076
+ if not return_dict:
1077
+ output = (logits,) + outputs[1:]
1078
+ return (loss,) + output if loss is not None else output
1079
+
1080
+ return CausalLMOutputWithPast(
1081
+ loss=loss,
1082
+ logits=logits,
1083
+ past_key_values=outputs.past_key_values,
1084
+ hidden_states=outputs.hidden_states,
1085
+ attentions=outputs.attentions,
1086
+ )
1087
+
1088
+
1089
+ @add_start_docstrings(
1090
+ """
1091
+ The Doge Model transformer with a sequence classification head on top (linear layer).
1092
+
1093
+ [`DogeForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1094
+ (e.g. GPT-2) do.
1095
+
1096
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1097
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1098
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1099
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1100
+ each row of the batch).
1101
+ """,
1102
+ DOGE_START_DOCSTRING,
1103
+ )
1104
+ class DogeForSequenceClassification(DogePreTrainedModel):
1105
+ def __init__(self, config: DogeConfig):
1106
+ super().__init__(config)
1107
+ self.num_labels = config.num_labels
1108
+
1109
+ self.model = DogeModel(config)
1110
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1111
+ self.config = config
1112
+
1113
+ # Initialize weights and apply final processing
1114
+ self.post_init()
1115
+
1116
+ def get_input_embeddings(self):
1117
+ return self.model.word_embed
1118
+
1119
+ def set_input_embeddings(self, value):
1120
+ self.model.word_embed = value
1121
+
1122
+ @add_start_docstrings_to_model_forward(DOGE_INPUTS_DOCSTRING)
1123
+ def forward(
1124
+ self,
1125
+ input_ids: Optional[torch.LongTensor] = None,
1126
+ attention_mask: Optional[torch.Tensor] = None,
1127
+ position_ids: Optional[torch.LongTensor] = None,
1128
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1129
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1130
+ labels: Optional[torch.LongTensor] = None,
1131
+ use_cache: Optional[bool] = None,
1132
+ output_attentions: Optional[bool] = None,
1133
+ output_hidden_states: Optional[bool] = None,
1134
+ return_dict: Optional[bool] = None,
1135
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1136
+ r"""
1137
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1138
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1139
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1140
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1141
+ """
1142
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1143
+
1144
+ transformer_outputs = self.model(
1145
+ input_ids,
1146
+ attention_mask=attention_mask,
1147
+ position_ids=position_ids,
1148
+ past_key_values=past_key_values,
1149
+ inputs_embeds=inputs_embeds,
1150
+ use_cache=use_cache,
1151
+ output_attentions=output_attentions,
1152
+ output_hidden_states=output_hidden_states,
1153
+ return_dict=return_dict,
1154
+ )
1155
+ hidden_states = transformer_outputs[0]
1156
+ logits = self.score(hidden_states)
1157
+
1158
+ if input_ids is not None:
1159
+ batch_size = input_ids.shape[0]
1160
+ else:
1161
+ batch_size = inputs_embeds.shape[0]
1162
+
1163
+ if self.config.pad_token_id is None and batch_size != 1:
1164
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1165
+ if self.config.pad_token_id is None:
1166
+ last_non_pad_token = -1
1167
+ elif input_ids is not None:
1168
+ # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
1169
+ non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
1170
+ token_indices = torch.arange(input_ids.shape[-1], device=logits.device)
1171
+ last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
1172
+ else:
1173
+ last_non_pad_token = -1
1174
+ logger.warning_once(
1175
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1176
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1177
+ )
1178
+
1179
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
1180
+
1181
+ loss = None
1182
+ if labels is not None:
1183
+ loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
1184
+
1185
+ if not return_dict:
1186
+ output = (pooled_logits,) + transformer_outputs[1:]
1187
+ return ((loss,) + output) if loss is not None else output
1188
+
1189
+ return SequenceClassifierOutputWithPast(
1190
+ loss=loss,
1191
+ logits=pooled_logits,
1192
+ past_key_values=transformer_outputs.past_key_values,
1193
+ hidden_states=transformer_outputs.hidden_states,
1194
+ attentions=transformer_outputs.attentions,
1195
+ )
1196
+
1197
+
1198
+ __all__ = ["DogeForCausalLM", "DogeModel", "DogePreTrainedModel", "DogeForSequenceClassification"]