JingzeShi commited on
Commit
1ec988c
·
verified ·
1 Parent(s): ec16999

Upload DogeForCausalLM

Browse files
Files changed (4) hide show
  1. config.json +46 -46
  2. configuration_doge.py +65 -65
  3. generation_config.json +1 -1
  4. modeling_doge.py +325 -360
config.json CHANGED
@@ -1,46 +1,46 @@
1
- {
2
- "_name_or_path": "./results/Doge-160M/checkpoint-21600",
3
- "architectures": [
4
- "DogeForCausalLM"
5
- ],
6
- "attention_dropout": 0.0,
7
- "auto_map": {
8
- "AutoConfig": "configuration_doge.DogeConfig",
9
- "AutoModelForCausalLM": "modeling_doge.DogeForCausalLM"
10
- },
11
- "bos_token_id": 0,
12
- "dynamic_mask_ratio": 0.0,
13
- "eos_token_id": 1,
14
- "expert_retrieval_size": 64,
15
- "hidden_act": "silu",
16
- "hidden_bias": false,
17
- "hidden_dropout": 0.0,
18
- "hidden_size": 768,
19
- "initializer_range": 0.02,
20
- "intermediate_size": 1536,
21
- "is_causal": false,
22
- "is_moe": false,
23
- "max_position_embeddings": 2048,
24
- "model_type": "doge",
25
- "num_attention_heads": 6,
26
- "num_cdmoe_experts": 16348,
27
- "num_cdmoe_experts_per_head": 8,
28
- "num_cdmoe_heads": 4,
29
- "num_channels": 3,
30
- "num_hidden_layers": 24,
31
- "num_key_value_heads": 3,
32
- "pad_token_id": 2,
33
- "patch_size": 16,
34
- "rms_norm_eps": 1e-06,
35
- "rope_scaling": {
36
- "factor": 4.0,
37
- "original_max_position_embeddings": 2048,
38
- "rope_type": "dynamic"
39
- },
40
- "rope_theta": 10000.0,
41
- "tie_word_embeddings": true,
42
- "torch_dtype": "float32",
43
- "transformers_version": "4.48.1",
44
- "use_cache": true,
45
- "vocab_size": 32768
46
- }
 
1
+ {
2
+ "_name_or_path": "SmallDoge/Doge-160M-checkpoint",
3
+ "architectures": [
4
+ "DogeForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_doge.DogeConfig",
9
+ "AutoModelForCausalLM": "modeling_doge.DogeForCausalLM"
10
+ },
11
+ "bos_token_id": 0,
12
+ "dynamic_mask_ratio": 0.0,
13
+ "eos_token_id": 1,
14
+ "expert_retrieval_size": 64,
15
+ "hidden_act": "silu",
16
+ "hidden_bias": false,
17
+ "hidden_dropout": 0.0,
18
+ "hidden_size": 768,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 1536,
21
+ "is_causal": false,
22
+ "is_moe": false,
23
+ "max_position_embeddings": 2048,
24
+ "model_type": "doge",
25
+ "num_attention_heads": 6,
26
+ "num_cdmoe_experts": 16348,
27
+ "num_cdmoe_experts_per_head": 8,
28
+ "num_cdmoe_heads": 4,
29
+ "num_channels": 3,
30
+ "num_hidden_layers": 24,
31
+ "num_key_value_heads": 3,
32
+ "pad_token_id": 2,
33
+ "patch_size": 16,
34
+ "rms_norm_eps": 1e-06,
35
+ "rope_scaling": {
36
+ "factor": 4.0,
37
+ "original_max_position_embeddings": 2048,
38
+ "rope_type": "dynamic"
39
+ },
40
+ "rope_theta": 10000.0,
41
+ "tie_word_embeddings": true,
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.48.3",
44
+ "use_cache": true,
45
+ "vocab_size": 32768
46
+ }
configuration_doge.py CHANGED
@@ -1,9 +1,14 @@
 
 
 
 
 
 
1
  # coding=utf-8
2
  # Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
3
  #
4
  # This code is based on the Wonderful Matrices paper implementation.
5
- #
6
- # https://arxiv.org/abs/2412.11834
7
  #
8
  # Licensed under the Apache License, Version 2.0 (the "License");
9
  # you may not use this file except in compliance with the License.
@@ -16,8 +21,6 @@
16
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
  # See the License for the specific language governing permissions and
18
  # limitations under the License.
19
- """PyTorch Doge model configuration"""
20
-
21
  from transformers.configuration_utils import PretrainedConfig
22
  from transformers.modeling_rope_utils import rope_config_validation
23
 
@@ -25,7 +28,7 @@ from transformers.modeling_rope_utils import rope_config_validation
25
  class DogeConfig(PretrainedConfig):
26
  r"""
27
  This is the configuration class to store the configuration of a [`DogeModel`]. It is used to instantiate an Doge
28
- model according to the specified arguments, defining the model architecture like [JingzeShi/Doge-20M](https://huggingface.co/JingzeShi/Doge-20M).
29
 
30
  Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
31
  documentation from [`PretrainedConfig`] for more information.
@@ -33,10 +36,6 @@ class DogeConfig(PretrainedConfig):
33
  Args:
34
  vocab_size (`int`, *optional*, defaults to 32768):
35
  Vocabulary size of the Doge model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`DogeModel`]
36
- num_channels (`int`, *optional*, defaults to 3):
37
- Number of channels in the input image.
38
- patch_size (`int`, *optional*, defaults to 16):
39
- Patch size of Vision Transformer Embeddings.
40
  hidden_size (`int`, *optional*, defaults to 1024):
41
  Dimension of the hidden representations.
42
  intermediate_size (`int`, *optional*, defaults to 2048):
@@ -49,25 +48,41 @@ class DogeConfig(PretrainedConfig):
49
  Dropout probability for each sequence transformation and state transformation module.
50
  hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
51
  The non-linear activation function (function or string) in the decoder.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  max_position_embeddings (`int`, *optional*, defaults to 2048):
53
  The maximum sequence length that this model might ever be used with.
54
  rope_theta (`float`, *optional*, defaults to 10000.0):
55
  The base period of the RoPE embeddings.
56
  rope_scaling (`Dict`, *optional*):
57
- Dictionary containing the scaling configuration for the RoPE embeddings.
58
  NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly.
 
59
  Expected contents:
60
  `rope_type` (`str`):
61
  The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation.
62
  `factor` (`float`, *optional*):
63
- Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings.
64
  In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length.
65
  `original_max_position_embeddings` (`int`, *optional*):
66
- Used with 'dynamic', 'longrope' and 'llama3'.
67
  The original max position embeddings used during pretraining.
68
  `attention_factor` (`float`, *optional*):
69
  Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
70
- computation.
71
  If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value.
72
  `beta_fast` (`float`, *optional*):
73
  Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
@@ -76,54 +91,51 @@ class DogeConfig(PretrainedConfig):
76
  Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
77
  ramp function. If unspecified, it defaults to 1.
78
  `short_factor` (`List[float]`, *optional*):
79
- Only used with 'longrope'. The scaling factor to be applied to short contexts (<`original_max_position_embeddings`).
80
  Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
81
  `long_factor` (`List[float]`, *optional*):
82
- Only used with 'longrope'. The scaling factor to be applied to long contexts (<`original_max_position_embeddings`).
83
  Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
84
  `low_freq_factor` (`float`, *optional*):
85
  Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
86
  `high_freq_factor` (`float`, *optional*):
87
  Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
88
- initializer_range (`float`, *optional*, defaults to 0.02):
89
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
90
- rms_norm_eps (`float`, *optional*, defaults to 1e-06):
91
- The epsilon used by the rms normalization layers.
92
- use_cache (`bool`, *optional*, defaults to `True`):
93
- Whether or not the model should return the last key/values attentions (not used by all models). Only
94
- relevant if `config.is_decoder=True`.
95
- pad_token_id (`int`, *optional*, defaults to 0):
96
- Padding token id.
97
- bos_token_id (`int`, *optional*, defaults to 1):
98
- Beginning of stream token id.
99
- eos_token_id (`int`, *optional*, defaults to 2):
100
- End of stream token id.
101
- tie_word_embeddings (`bool`, *optional*, defaults to `True`):
102
- Whether to tie weight embeddings
103
  num_attention_heads (`int`, *optional*, defaults to 8):
104
  Number of attention heads for each attention layer in the Transformer decoder.
105
- num_key_value_heads (`int`, *optional*, defaults to `None`):
106
- This is the number of key_value heads that should be used to implement Grouped Query Attention.
107
  If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
108
- `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used.
109
- When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group.
110
- For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf).
111
  If it is not specified, will default to `num_attention_heads`.
112
  attention_dropout (`float`, *optional*, defaults to 0.0):
113
  The dropout ratio for the attention probabilities.
114
- dynamic_mask_ratio (`float`, *optional*, defaults to 0.0, range [0, 1]):
115
- The ratio to control the proportion of the dynamic mask filled with the minimum value.
116
  is_moe (`bool`, *optional*, defaults to `False`):
117
- Whether to use the Cross Domain Mixture of Experts, if `True`, the MoE will inherit the MLP to initialize
118
  num_cdmoe_experts (`int`, *optional*, defaults to 16348):
119
- Number of Private Experts for the Cross Domain Mixture of Experts. calculation formula: :math:`\text{num_cdmoe_experts} = (32 \times \text{num_cdmoe_heads})^2`
120
  num_cdmoe_heads (`int`, *optional*, defaults to 4):
121
- Number of heads of Private Experts for the Cross Domain Mixture of Experts.
122
  num_cdmoe_experts_per_head (`int`, *optional*, defaults to 8):
123
- Number of Private Experts per head for the Cross Domain Mixture of Experts.
124
  expert_retrieval_size (`int`, *optional*, defaults to 64):
125
- Dimension of the Expert retrieval states for the Cross Domain Mixture of Experts.
126
- """
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
  model_type = "doge"
129
  keys_to_ignore_at_inference = ["past_key_values"]
@@ -132,7 +144,7 @@ class DogeConfig(PretrainedConfig):
132
  "layers.*.self_attn.q_proj": "colwise",
133
  "layers.*.self_attn.k_proj": "colwise",
134
  "layers.*.self_attn.v_proj": "colwise",
135
- "layers.*.self_attn.dt_proj": "colwise",
136
  "layers.*.self_attn.o_proj": "rowwise",
137
  "layers.*.mlp.gate_proj": "colwise",
138
  "layers.*.mlp.up_proj": "colwise",
@@ -142,33 +154,26 @@ class DogeConfig(PretrainedConfig):
142
  def __init__(
143
  self,
144
  vocab_size=32768,
145
- num_channels=3,
146
- patch_size=16,
147
  hidden_size=1024,
148
  intermediate_size=2048,
149
  num_hidden_layers=32,
150
  hidden_bias=False,
151
  hidden_dropout=0.0,
152
  hidden_act="silu",
153
- max_position_embeddings=2048,
154
- rope_theta=10000.0,
155
- rope_scaling={
156
- "rope_type": "dynamic",
157
- "factor": 4.0,
158
- "original_max_position_embeddings": 2048,
159
- },
160
  initializer_range=0.02,
161
  rms_norm_eps=1e-06,
162
  use_cache=True,
163
  bos_token_id=0,
164
  eos_token_id=1,
165
  pad_token_id=2,
166
- tie_word_embeddings=True,
 
 
 
167
  num_attention_heads=8,
168
  num_key_value_heads=None,
169
  attention_dropout=0.0,
170
  dynamic_mask_ratio=0.0,
171
- is_causal=False,
172
  is_moe=False,
173
  num_cdmoe_experts=16348,
174
  num_cdmoe_heads=4,
@@ -177,29 +182,24 @@ class DogeConfig(PretrainedConfig):
177
  **kwargs,
178
  ):
179
  self.vocab_size = vocab_size
180
- self.num_channels = num_channels
181
- self.patch_size = patch_size
182
  self.hidden_size = hidden_size
183
  self.intermediate_size = intermediate_size
184
  self.num_hidden_layers = num_hidden_layers
 
185
  self.hidden_bias = hidden_bias
186
  self.hidden_dropout = hidden_dropout
187
  self.hidden_act = hidden_act
188
- self.max_position_embeddings = max_position_embeddings
189
- self.rope_theta = rope_theta
190
- self.rope_scaling = rope_scaling
191
  self.initializer_range = initializer_range
192
  self.rms_norm_eps = rms_norm_eps
193
  self.use_cache = use_cache
194
- self.bos_token_id = bos_token_id
195
- self.eos_token_id = eos_token_id
196
- self.pad_token_id = pad_token_id
197
- self.tie_word_embeddings = tie_word_embeddings
198
  self.num_attention_heads = num_attention_heads
199
  self.num_key_value_heads = num_key_value_heads
200
  self.attention_dropout = attention_dropout
201
  self.dynamic_mask_ratio = dynamic_mask_ratio
202
- self.is_causal = is_causal
203
  self.is_moe = is_moe
204
  self.num_cdmoe_experts = num_cdmoe_experts
205
  self.num_cdmoe_heads = num_cdmoe_heads
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/doge/modular_doge.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_doge.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
  # coding=utf-8
8
  # Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
9
  #
10
  # This code is based on the Wonderful Matrices paper implementation.
11
+ # The Doge family of small language models is trained by Jingze Shi.
 
12
  #
13
  # Licensed under the Apache License, Version 2.0 (the "License");
14
  # you may not use this file except in compliance with the License.
 
21
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22
  # See the License for the specific language governing permissions and
23
  # limitations under the License.
 
 
24
  from transformers.configuration_utils import PretrainedConfig
25
  from transformers.modeling_rope_utils import rope_config_validation
26
 
 
28
  class DogeConfig(PretrainedConfig):
29
  r"""
30
  This is the configuration class to store the configuration of a [`DogeModel`]. It is used to instantiate an Doge
31
+ model according to the specified arguments, defining the model architecture like [SmallDoge/Doge-20M](https://huggingface.co/SmallDoge/Doge-20M).
32
 
33
  Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
  documentation from [`PretrainedConfig`] for more information.
 
36
  Args:
37
  vocab_size (`int`, *optional*, defaults to 32768):
38
  Vocabulary size of the Doge model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`DogeModel`]
 
 
 
 
39
  hidden_size (`int`, *optional*, defaults to 1024):
40
  Dimension of the hidden representations.
41
  intermediate_size (`int`, *optional*, defaults to 2048):
 
48
  Dropout probability for each sequence transformation and state transformation module.
49
  hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
50
  The non-linear activation function (function or string) in the decoder.
51
+ initializer_range (`float`, *optional*, defaults to 0.02):
52
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
53
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
54
+ The epsilon used by the rms normalization layers.
55
+ use_cache (`bool`, *optional*, defaults to `True`):
56
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
57
+ relevant if `config.is_decoder=True`.
58
+ bos_token_id (`int`, *optional*, defaults to 0):
59
+ Beginning of stream token id.
60
+ eos_token_id (`int`, *optional*, defaults to 1):
61
+ End of stream token id.
62
+ pad_token_id (`int`, *optional*, defaults to 2):
63
+ Padding token id.
64
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
65
+ Whether to tie weight embeddings
66
  max_position_embeddings (`int`, *optional*, defaults to 2048):
67
  The maximum sequence length that this model might ever be used with.
68
  rope_theta (`float`, *optional*, defaults to 10000.0):
69
  The base period of the RoPE embeddings.
70
  rope_scaling (`Dict`, *optional*):
71
+ Dictionary containing the scaling configuration for the RoPE embeddings.
72
  NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly.
73
+ Doge family of small models use `{ 'rope_type': 'dynamic', 'factor': 4.0, 'original_max_position_embeddings': 2048 }` as the default value.
74
  Expected contents:
75
  `rope_type` (`str`):
76
  The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation.
77
  `factor` (`float`, *optional*):
78
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings.
79
  In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length.
80
  `original_max_position_embeddings` (`int`, *optional*):
81
+ Used with 'dynamic', 'longrope' and 'llama3'.
82
  The original max position embeddings used during pretraining.
83
  `attention_factor` (`float`, *optional*):
84
  Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
85
+ computation.
86
  If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value.
87
  `beta_fast` (`float`, *optional*):
88
  Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
 
91
  Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
92
  ramp function. If unspecified, it defaults to 1.
93
  `short_factor` (`List[float]`, *optional*):
94
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<`original_max_position_embeddings`).
95
  Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
96
  `long_factor` (`List[float]`, *optional*):
97
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<`original_max_position_embeddings`).
98
  Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2
99
  `low_freq_factor` (`float`, *optional*):
100
  Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
101
  `high_freq_factor` (`float`, *optional*):
102
  Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  num_attention_heads (`int`, *optional*, defaults to 8):
104
  Number of attention heads for each attention layer in the Transformer decoder.
105
+ num_key_value_heads (`int`, *optional*):
106
+ This is the number of key_value heads that should be used to implement Grouped Query Attention.
107
  If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
108
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used.
109
+ When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group.
110
+ For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf).
111
  If it is not specified, will default to `num_attention_heads`.
112
  attention_dropout (`float`, *optional*, defaults to 0.0):
113
  The dropout ratio for the attention probabilities.
114
+ dynamic_mask_ratio (`float`, *optional*, defaults to 0.0):
115
+ The ratio to control the proportion of the dynamic mask filled with the minimum value. For more details checkout [this paper](https://arxiv.org/pdf/2412.11834).
116
  is_moe (`bool`, *optional*, defaults to `False`):
117
+ Whether to use the Cross Domain Mixture of Experts, if `True`, the MoE will inherit the MLP to initialize. For more details checkout [this paper](https://arxiv.org/pdf/2412.11834).
118
  num_cdmoe_experts (`int`, *optional*, defaults to 16348):
119
+ Number of Experts for the Cross Domain Mixture of Experts.
120
  num_cdmoe_heads (`int`, *optional*, defaults to 4):
121
+ Number of retrieval heads, used to mix multi-head experts.
122
  num_cdmoe_experts_per_head (`int`, *optional*, defaults to 8):
123
+ Number of Experts per retrieval head, used to mix multi-head experts.
124
  expert_retrieval_size (`int`, *optional*, defaults to 64):
125
+ Dimension of the Expert retrieval states for calculating the dot product of query and key to determine the expert index.
126
+
127
+ ```python
128
+ >>> from transformers import DogeConfig, DogeModel
129
+
130
+ >>> # Initializing a Doge-320M style configuration
131
+ >>> configuration = DogeConfig()
132
+
133
+ >>> # Initializing a model from the Doge-320M style configuration
134
+ >>> model = DogeModel(configuration)
135
+
136
+ >>> # Accessing the model configuration
137
+ >>> configuration = model.config
138
+ ```"""
139
 
140
  model_type = "doge"
141
  keys_to_ignore_at_inference = ["past_key_values"]
 
144
  "layers.*.self_attn.q_proj": "colwise",
145
  "layers.*.self_attn.k_proj": "colwise",
146
  "layers.*.self_attn.v_proj": "colwise",
147
+ "layers.*.self_attn.dt_proj": "rowwise",
148
  "layers.*.self_attn.o_proj": "rowwise",
149
  "layers.*.mlp.gate_proj": "colwise",
150
  "layers.*.mlp.up_proj": "colwise",
 
154
  def __init__(
155
  self,
156
  vocab_size=32768,
 
 
157
  hidden_size=1024,
158
  intermediate_size=2048,
159
  num_hidden_layers=32,
160
  hidden_bias=False,
161
  hidden_dropout=0.0,
162
  hidden_act="silu",
 
 
 
 
 
 
 
163
  initializer_range=0.02,
164
  rms_norm_eps=1e-06,
165
  use_cache=True,
166
  bos_token_id=0,
167
  eos_token_id=1,
168
  pad_token_id=2,
169
+ tie_word_embeddings=False,
170
+ max_position_embeddings=2048,
171
+ rope_theta=10000.0,
172
+ rope_scaling=None,
173
  num_attention_heads=8,
174
  num_key_value_heads=None,
175
  attention_dropout=0.0,
176
  dynamic_mask_ratio=0.0,
 
177
  is_moe=False,
178
  num_cdmoe_experts=16348,
179
  num_cdmoe_heads=4,
 
182
  **kwargs,
183
  ):
184
  self.vocab_size = vocab_size
 
 
185
  self.hidden_size = hidden_size
186
  self.intermediate_size = intermediate_size
187
  self.num_hidden_layers = num_hidden_layers
188
+
189
  self.hidden_bias = hidden_bias
190
  self.hidden_dropout = hidden_dropout
191
  self.hidden_act = hidden_act
 
 
 
192
  self.initializer_range = initializer_range
193
  self.rms_norm_eps = rms_norm_eps
194
  self.use_cache = use_cache
195
+
196
+ self.max_position_embeddings = max_position_embeddings
197
+ self.rope_theta = rope_theta
198
+ self.rope_scaling = rope_scaling
199
  self.num_attention_heads = num_attention_heads
200
  self.num_key_value_heads = num_key_value_heads
201
  self.attention_dropout = attention_dropout
202
  self.dynamic_mask_ratio = dynamic_mask_ratio
 
203
  self.is_moe = is_moe
204
  self.num_cdmoe_experts = num_cdmoe_experts
205
  self.num_cdmoe_heads = num_cdmoe_heads
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "bos_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 2,
6
- "transformers_version": "4.48.1"
7
  }
 
3
  "bos_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 2,
6
+ "transformers_version": "4.48.3"
7
  }
modeling_doge.py CHANGED
@@ -1,9 +1,14 @@
 
 
 
 
 
 
1
  # coding=utf-8
2
  # Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
3
  #
4
  # This code is based on the Wonderful Matrices paper implementation.
5
- #
6
- # https://arxiv.org/abs/2412.11834
7
  #
8
  # Licensed under the Apache License, Version 2.0 (the "License");
9
  # you may not use this file except in compliance with the License.
@@ -16,24 +21,19 @@
16
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
  # See the License for the specific language governing permissions and
18
  # limitations under the License.
19
- """PyTorch Doge model."""
20
 
21
  import math
22
  from typing import Callable, List, Optional, Tuple, Union
23
 
24
  import torch
25
  import torch.nn.functional as F
26
- import torch.utils.checkpoint
27
  from torch import nn
28
 
29
  from transformers.activations import ACT2FN
30
  from transformers.cache_utils import Cache, DynamicCache, StaticCache
31
  from transformers.generation import GenerationMixin
32
- from transformers.modeling_outputs import (
33
- BaseModelOutputWithPast,
34
- CausalLMOutputWithPast,
35
- SequenceClassifierOutputWithPast,
36
- )
37
  from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
38
  from transformers.modeling_utils import PreTrainedModel
39
  from transformers.processing_utils import Unpack
@@ -41,30 +41,24 @@ from transformers.utils import (
41
  LossKwargs,
42
  add_start_docstrings,
43
  add_start_docstrings_to_model_forward,
44
- is_torch_greater_or_equal,
45
  logging,
46
  replace_return_docstrings,
47
  )
48
  from .configuration_doge import DogeConfig
49
 
50
- try:
51
- from einx import add as einx_add
52
- except ImportError:
53
- einx_add = None
54
-
55
- if is_torch_greater_or_equal("2.5"):
56
  from torch.nn.attention.flex_attention import flex_attention
57
 
58
-
59
  logger = logging.get_logger(__name__)
60
 
61
  _CONFIG_FOR_DOC = "DogeConfig"
62
 
63
 
64
- class RMSNorm(nn.Module):
65
  def __init__(self, hidden_size, eps=1e-6):
66
  """
67
- RMSNorm is equivalent to T5LayerNorm
68
  """
69
  super().__init__()
70
  self.weight = nn.Parameter(torch.ones(hidden_size))
@@ -81,7 +75,7 @@ class RMSNorm(nn.Module):
81
  return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
82
 
83
 
84
- class Residual(nn.Module):
85
  def __init__(self, hidden_size):
86
  super().__init__()
87
  self.weight = nn.Parameter(torch.ones(hidden_size))
@@ -93,23 +87,21 @@ class Residual(nn.Module):
93
  return f"{tuple(self.weight.shape)}"
94
 
95
 
96
- class RotaryEmbedding(nn.Module):
97
- def __init__(self, config: Optional[DogeConfig] = None):
98
  super().__init__()
99
- self.rope_kwargs = {}
100
-
101
- if config.rope_scaling is not None:
102
  self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
103
  else:
104
  self.rope_type = "default"
105
  self.max_seq_len_cached = config.max_position_embeddings
106
  self.original_max_seq_len = config.max_position_embeddings
107
- self.base = config.rope_theta
108
 
109
  self.config = config
110
  self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
111
 
112
- inv_freq, self.attention_scaling = self.rope_init_fn(self.config, **self.rope_kwargs)
113
  self.register_buffer("inv_freq", inv_freq, persistent=False)
114
  self.original_inv_freq = self.inv_freq
115
 
@@ -121,13 +113,14 @@ class RotaryEmbedding(nn.Module):
121
  """
122
  seq_len = torch.max(position_ids) + 1
123
  if seq_len > self.max_seq_len_cached: # growth
124
- inv_freq, self.attention_scaling = self.rope_init_fn(
125
- self.config, device, seq_len=seq_len, **self.rope_kwargs
126
- )
127
  self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
128
  self.max_seq_len_cached = seq_len
129
 
130
  if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
 
 
 
131
  self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
132
  self.max_seq_len_cached = self.original_max_seq_len
133
 
@@ -136,7 +129,7 @@ class RotaryEmbedding(nn.Module):
136
  if "dynamic" in self.rope_type:
137
  self._dynamic_frequency_update(position_ids, device=x.device)
138
 
139
- # core RoPE block
140
  inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
141
  position_ids_expanded = position_ids[:, None, :].float()
142
  # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
@@ -156,15 +149,13 @@ class RotaryEmbedding(nn.Module):
156
 
157
 
158
  def rotate_half(x):
159
- """
160
- Rotates half the hidden dims of the input.
161
- """
162
  x1 = x[..., : x.shape[-1] // 2]
163
  x2 = x[..., x.shape[-1] // 2 :]
164
  return torch.cat((-x2, x1), dim=-1)
165
 
166
 
167
- def apply_QK_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
168
  """Applies Rotary Position Embedding to the query and key tensors.
169
 
170
  Args:
@@ -176,10 +167,11 @@ def apply_QK_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
176
  Deprecated and unused.
177
  unsqueeze_dim (`int`, *optional*, defaults to 1):
178
  The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
179
- sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k.
180
- For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim].
181
- Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k.
182
- Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
 
183
  Returns:
184
  `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
185
  """
@@ -192,8 +184,8 @@ def apply_QK_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
192
 
193
  def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
194
  """
195
- This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep).
196
- The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
197
  """
198
  batch, num_key_value_heads, slen, head_dim = hidden_states.shape
199
  if n_rep == 1:
@@ -202,6 +194,148 @@ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
202
  return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
203
 
204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  class DogeDynamicMaskAttention(nn.Module):
206
  """Dynamic Mask Attention from 'Wonderful Matrices' paper."""
207
 
@@ -209,48 +343,28 @@ class DogeDynamicMaskAttention(nn.Module):
209
  super().__init__()
210
  self.config = config
211
  self.layer_idx = layer_idx
212
- self.head_dim = config.hidden_size // config.num_attention_heads
213
  self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
214
- self.scaling = self.head_dim ** -0.5
215
  self.attention_dropout = config.attention_dropout
216
  self.dynamic_mask_ratio = config.dynamic_mask_ratio
217
- self.is_causal = config.is_causal
218
 
219
- self.ALL_ATTENTION_FUNCTIONS = {
220
- "eager": self.eager_attention_forward,
221
- "flex_attention": self.flex_attention_forward,
222
- "sdpa": self.sdpa_attention_forward,
223
- }
224
-
225
- # Q K V O projections
226
  self.q_proj = nn.Linear(
227
- config.hidden_size,
228
- config.num_attention_heads * self.head_dim,
229
- bias=config.hidden_bias
230
  )
231
  self.k_proj = nn.Linear(
232
- config.hidden_size,
233
- config.num_key_value_heads * self.head_dim,
234
- bias=config.hidden_bias
235
  )
236
  self.v_proj = nn.Linear(
237
- config.hidden_size,
238
- config.num_key_value_heads * self.head_dim,
239
- bias=config.hidden_bias
240
- )
241
- # dynamic mask for the QK^T attention score matrix
242
- self.A = nn.Parameter(
243
- torch.zeros(config.num_attention_heads)
244
  )
 
 
245
  self.dt_proj = nn.Linear(
246
- config.num_key_value_heads * self.head_dim,
247
- config.num_attention_heads,
248
- bias=config.hidden_bias
249
  )
250
  self.o_proj = nn.Linear(
251
- config.num_attention_heads * self.head_dim,
252
- config.hidden_size,
253
- bias=config.hidden_bias
254
  )
255
 
256
  def forward(
@@ -261,7 +375,7 @@ class DogeDynamicMaskAttention(nn.Module):
261
  past_key_value: Optional[Cache] = None,
262
  cache_position: Optional[torch.LongTensor] = None,
263
  **kwargs,
264
- ) -> Tuple[torch.Tensor, Optional[Cache]]:
265
  input_shape = hidden_states.shape[:-1]
266
  hidden_shape = (*input_shape, -1, self.head_dim)
267
 
@@ -270,21 +384,18 @@ class DogeDynamicMaskAttention(nn.Module):
270
  value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
271
 
272
  cos, sin = position_embeddings
273
- query_states, key_states = apply_QK_rotary_pos_emb(query_states, key_states, cos, sin)
274
 
275
  if past_key_value is not None:
276
  # sin and cos are specific to RoPE models; cache_position needed for the static cache
277
  cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
278
  key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
279
 
280
-
281
- dynamic_mask = None
282
- if self.is_causal is False:
283
- # calculate dynamic mask from value_states
284
- # NOTE: If these weights are not trained in causal mode, a mask of all ones will be returned, which will not affect the training results of causal mode
285
- # TODO: The main reason for setting causal mode is that the Flex Attention kernel does not yet support score_mod functions with learnable parameters. However, we can continue training from the causal checkpoint later.
286
- dt_states = self.dt_proj(value_states.transpose(1, 2).reshape(value_states.shape[0], value_states.shape[-2], -1))
287
- dynamic_mask = torch.exp(self.A * F.softplus(dt_states)).transpose(-1, -2)
288
  attn_mask = self.prepare_dynamic_mask(
289
  hidden_states=hidden_states,
290
  dynamic_mask=dynamic_mask,
@@ -292,11 +403,18 @@ class DogeDynamicMaskAttention(nn.Module):
292
  attention_mask=attention_mask,
293
  )
294
 
295
- attention_interface: Callable = self.eager_attention_forward
296
  if self.config._attn_implementation != "eager":
297
- attention_interface = self.ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
298
-
299
- attn_output = attention_interface(
 
 
 
 
 
 
 
300
  query_states,
301
  key_states,
302
  value_states,
@@ -308,7 +426,7 @@ class DogeDynamicMaskAttention(nn.Module):
308
 
309
  attn_output = attn_output.reshape(*input_shape, -1).contiguous()
310
  attn_output = self.o_proj(attn_output)
311
- return attn_output
312
 
313
  def prepare_dynamic_mask(
314
  self,
@@ -341,113 +459,9 @@ class DogeDynamicMaskAttention(nn.Module):
341
  attn_mask = attention_mask
342
 
343
  return attn_mask
344
-
345
- def eager_attention_forward(
346
- self,
347
- query: torch.Tensor,
348
- key: torch.Tensor,
349
- value: torch.Tensor,
350
- attention_mask: Optional[torch.Tensor],
351
- scaling: float,
352
- dropout: float = 0.0,
353
- **kwargs,
354
- ) -> torch.Tensor:
355
- key_states = repeat_kv(key, self.num_key_value_groups)
356
- value_states = repeat_kv(value, self.num_key_value_groups)
357
-
358
- # compute attention scores matrix
359
- attn_weights = torch.matmul(query, key_states.transpose(-1, -2)) * scaling
360
- if attention_mask is not None:
361
- causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
362
- attn_weights = attn_weights + causal_mask
363
-
364
- # upcast attention scores to fp32
365
- attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
366
- attn_weights = F.dropout(attn_weights, p=dropout, training=self.training)
367
-
368
- # apply attention scores to value states
369
- attn_output = torch.matmul(attn_weights, value_states)
370
- attn_output = attn_output.transpose(1, 2).contiguous()
371
- return attn_output
372
-
373
- def sdpa_attention_forward(
374
- self,
375
- query: torch.Tensor,
376
- key: torch.Tensor,
377
- value: torch.Tensor,
378
- attention_mask: Optional[torch.Tensor],
379
- scaling: float,
380
- dropout: float = 0.0,
381
- **kwargs,
382
- ) -> torch.Tensor:
383
- key = repeat_kv(key, self.num_key_value_groups)
384
- value = repeat_kv(value, self.num_key_value_groups)
385
-
386
- causal_mask = attention_mask
387
- if attention_mask is not None:
388
- causal_mask = causal_mask[:, :, :, : key.shape[-2]]
389
-
390
- # SDPA with memory-efficient backend is bugged with non-contiguous inputs and custom attn_mask for some torch versions
391
- # Reference: https://github.com/pytorch/pytorch/issues/112577.
392
- query = query.contiguous()
393
- key = key.contiguous()
394
- value = value.contiguous()
395
-
396
- # NOTE: As of pytorch 2.5.1, cuDNN's SDPA backward pass is still incorrect, so we disable cuDNN SDPA (see https://github.com/pytorch/pytorch/issues/138581)
397
- torch.backends.cuda.enable_cudnn_sdp(False)
398
- attn_output = F.scaled_dot_product_attention(
399
- query,
400
- key,
401
- value,
402
- attn_mask=causal_mask,
403
- dropout_p=dropout,
404
- scale=scaling,
405
- # enable_gqa=True,
406
- )
407
- attn_output = attn_output.transpose(1, 2).contiguous()
408
- return attn_output
409
-
410
- def flex_attention_forward(
411
- self,
412
- query: torch.Tensor,
413
- key: torch.Tensor,
414
- value: torch.Tensor,
415
- attention_mask: Optional[torch.Tensor],
416
- scaling: float,
417
- dropout: float = 0.0,
418
- **kwargs,
419
- ) -> torch.Tensor:
420
- causal_mask = attention_mask
421
- if attention_mask is not None:
422
- causal_mask = causal_mask[:, :, :, : key.shape[-2]]
423
-
424
- # TODO: flex_attention: As of pytorch 2.5.1, captured buffers that require grad are not yet supported.
425
- # NOTE: So we only use flex_attention in inference mode.
426
-
427
- def causal_mod(score, batch, head, q_idx, kv_idx):
428
- score = score + causal_mask[batch][0][q_idx][kv_idx]
429
- return score
430
-
431
- def dynamic_mod(score, batch, head, q_idx, kv_idx):
432
- score = score + causal_mask[batch][head][q_idx][kv_idx]
433
- return score
434
-
435
- mask_mod = causal_mod if self.is_causal else dynamic_mod
436
-
437
- attn_output = flex_attention(
438
- query,
439
- key,
440
- value,
441
- score_mod=mask_mod,
442
- scale=scaling,
443
- enable_gqa=True,
444
- )
445
- attn_output = attn_output.transpose(1, 2).contiguous()
446
- return attn_output
447
 
448
 
449
  class DogeMLP(nn.Module):
450
-
451
  def __init__(self, config: DogeConfig):
452
  super().__init__()
453
  self.hidden_dim = config.hidden_size
@@ -482,11 +496,11 @@ class DogeCDMoE(DogeMLP):
482
  self.num_keys = int(math.sqrt(self.num_cdmoe_experts))
483
 
484
  # queries and keys for retrieval experts
485
- self.queries = nn.Linear(self.hidden_dim, self.num_cdmoe_heads * self.expert_retrieval_dim, bias=False)
486
- self.keys = nn.Parameter(torch.zeros(self.num_cdmoe_heads, self.num_keys, 2, self.expert_retrieval_dim // 2))
487
 
488
  # experts
489
- self.down_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
490
  self.up_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
491
 
492
  def forward(
@@ -496,30 +510,28 @@ class DogeCDMoE(DogeMLP):
496
  ) -> torch.Tensor:
497
  bsz, seq_len, _ = hidden_states.shape
498
 
499
- # get similarity with queries and keys
500
- queries = self.queries(hidden_states)
501
- queries = queries.view(bsz, seq_len, 2, self.num_cdmoe_heads, -1).permute(2, 0, 1, 3, 4)
502
- sim = torch.einsum("p b t h n, h k p n -> p b t h k", queries, self.keys)
503
-
504
- # get experts with the highest similarity
505
- (scores_x, scores_y), (indices_x, indices_y) = sim.topk(self.num_cdmoe_experts_per_head, dim=-1)
506
- if einx_add is not None:
507
- all_scores = einx_add("... i, ... j -> ... (i j)", scores_x, scores_y)
508
- all_indices = einx_add("... i, ... j -> ... (i j)", indices_x * self.num_keys, indices_y)
509
- else:
510
- all_scores = scores_x.unsqueeze(-1) + scores_y.unsqueeze(-2)
511
- all_scores = all_scores.view(*scores_x.shape[:-1], -1)
512
- all_indices = (indices_x.unsqueeze(-1) * self.num_keys) + indices_y.unsqueeze(-2)
513
- all_indices = all_indices.view(*indices_x.shape[:-1], -1)
514
  scores, pk_indices = all_scores.topk(self.num_cdmoe_experts_per_head, dim=-1)
515
  indices = all_indices.gather(-1, pk_indices)
516
  down_embed = self.down_embed(indices)
517
  up_embed = self.up_embed(indices)
518
 
519
  # mix experts states with cross domain states
520
- experts_weights = torch.einsum("b t d, b t h k d -> b t h k", hidden_states, down_embed)
521
  experts_weights = self.act_fn(experts_weights) * scores.softmax(dim=-1)
522
- experts_states = torch.einsum("b t h k, b t h k d -> b t d", experts_weights, up_embed)
523
  hidden_states = self.down_proj(self.act_fn(self.gate_proj(hidden_states)) * self.up_proj(hidden_states))
524
  hidden_states = hidden_states + experts_states
525
  return hidden_states
@@ -530,13 +542,13 @@ class DogeDecoderLayer(nn.Module):
530
  super().__init__()
531
  self.hidden_dropout = config.hidden_dropout
532
 
533
- self.pre_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
534
  self.self_attn = DogeDynamicMaskAttention(config=config, layer_idx=layer_idx)
535
- self.pre_residual = Residual(config.hidden_size)
536
 
537
- self.post_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
538
- self.feed_forward = DogeMLP(config) if config.is_moe == False else DogeCDMoE(config)
539
- self.post_residual = Residual(config.hidden_size)
540
 
541
  def forward(
542
  self,
@@ -550,15 +562,16 @@ class DogeDecoderLayer(nn.Module):
550
  position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
551
  **kwargs,
552
  ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
553
-
554
  # sequence transformation
555
  residual = hidden_states
556
  hidden_states = self.pre_layernorm(hidden_states)
557
- hidden_states = self.self_attn(
558
  hidden_states=hidden_states,
559
  attention_mask=attention_mask,
560
  position_ids=position_ids,
561
  past_key_value=past_key_value,
 
 
562
  cache_position=cache_position,
563
  position_embeddings=position_embeddings,
564
  **kwargs,
@@ -596,6 +609,8 @@ DOGE_START_DOCSTRING = r"""
596
  load the weights associated with the model, only the configuration. Check out the
597
  [`~PreTrainedModel.from_pretrained`] method to load the model weights.
598
  """
 
 
599
  @add_start_docstrings(
600
  "The bare Doge Model outputting raw hidden-states without any specific head on top.",
601
  DOGE_START_DOCSTRING,
@@ -607,7 +622,7 @@ class DogePreTrainedModel(PreTrainedModel):
607
  _no_split_modules = ["DogeDecoderLayer"]
608
  _skip_keys_device_placement = ["past_key_values"]
609
  _supports_sdpa = True
610
- _supports_flex_attn = True
611
  _supports_cache_class = True
612
  _supports_quantized_cache = True
613
  _supports_static_cache = True
@@ -718,11 +733,11 @@ class DogeModel(DogePreTrainedModel):
718
  self.vocab_size = config.vocab_size
719
 
720
  self.word_embed = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
721
- self.rotary_emb = RotaryEmbedding(config)
722
  self.layers = nn.ModuleList(
723
  [DogeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
724
  )
725
- self.final_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
726
  self.gradient_checkpointing = False
727
 
728
  # Initialize weights and apply final processing
@@ -849,9 +864,27 @@ class DogeModel(DogePreTrainedModel):
849
  past_key_values: Cache,
850
  output_attentions: bool,
851
  ):
 
 
 
 
 
 
 
 
852
  past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
853
  using_static_cache = isinstance(past_key_values, StaticCache)
854
 
 
 
 
 
 
 
 
 
 
 
855
  dtype, device = input_tensor.dtype, input_tensor.device
856
  sequence_length = input_tensor.shape[1]
857
  if using_static_cache:
@@ -863,9 +896,9 @@ class DogeModel(DogePreTrainedModel):
863
  else past_seen_tokens + sequence_length + 1
864
  )
865
 
866
- # in case the provided `attention` mask is 2D, we generate a causal mask here (4D).
867
  causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
868
- attention_mask=attention_mask,
869
  sequence_length=sequence_length,
870
  target_length=target_length,
871
  dtype=dtype,
@@ -874,17 +907,29 @@ class DogeModel(DogePreTrainedModel):
874
  batch_size=input_tensor.shape[0],
875
  )
876
 
 
 
 
 
 
 
 
 
 
 
 
 
877
  return causal_mask
878
-
879
  @staticmethod
880
  def _prepare_4d_causal_attention_mask_with_cache_position(
881
- attention_mask: torch.Tensor = None,
882
- sequence_length: int = None,
883
- target_length: int = None,
884
- dtype: torch.dtype = None,
885
- device: torch.device = None,
886
- cache_position: torch.Tensor = None,
887
- batch_size: int = None,
888
  **kwargs,
889
  ):
890
  """
@@ -915,8 +960,7 @@ class DogeModel(DogePreTrainedModel):
915
  else:
916
  min_dtype = torch.finfo(dtype).min
917
  causal_mask = torch.full(
918
- (sequence_length, target_length),
919
- fill_value=min_dtype, dtype=dtype, device=device,
920
  )
921
  if sequence_length != 1:
922
  causal_mask = torch.triu(causal_mask, diagonal=1)
@@ -934,9 +978,6 @@ class DogeModel(DogePreTrainedModel):
934
  return causal_mask
935
 
936
 
937
- class KwargsForCausalLM(LossKwargs): ...
938
-
939
-
940
  class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
941
  _tied_weights_keys = ["lm_head.weight"]
942
  _tp_plan = {"lm_head": "colwise_rep"}
@@ -962,7 +1003,7 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
962
 
963
  def set_output_embeddings(self, new_embeddings):
964
  self.lm_head = new_embeddings
965
-
966
  def get_decoder(self):
967
  return self.model
968
 
@@ -984,8 +1025,8 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
984
  output_hidden_states: Optional[bool] = None,
985
  return_dict: Optional[bool] = None,
986
  cache_position: Optional[torch.LongTensor] = None,
987
- num_logits_to_keep: int = 0,
988
- **kwargs: Unpack[KwargsForCausalLM],
989
  ) -> Union[Tuple, CausalLMOutputWithPast]:
990
  r"""
991
  Args:
@@ -994,10 +1035,12 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
994
  config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
995
  (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
996
 
997
- num_logits_to_keep (`int`, *optional*):
998
- Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
999
  `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
1000
  token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
 
 
1001
 
1002
  Returns:
1003
 
@@ -1006,8 +1049,8 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
1006
  ```python
1007
  >>> from transformers import AutoTokenizer, AutoModelForCausalLM
1008
 
1009
- >>> model = AutoModelForCausalLM.from_pretrained("JingzeShi/Doge-20M-Instruct")
1010
- >>> tokenizer = AutoTokenizer.from_pretrained("JingzeShi/Doge-20M-Instruct")
1011
 
1012
  >>> prompt = "Hey, are you conscious? Can you talk to me?"
1013
  >>> inputs = tokenizer(prompt, return_tensors="pt")
@@ -1039,9 +1082,9 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
1039
  )
1040
 
1041
  hidden_states = outputs[0]
1042
-
1043
  # only compute necessary logits, and do not upcast them to float if we are not computing the loss
1044
- logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
 
1045
 
1046
  loss = None
1047
  if labels is not None:
@@ -1060,111 +1103,32 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
1060
  )
1061
 
1062
 
1063
- class DogePatchEmbedding(nn.Module):
1064
- """
1065
- This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` of shape `(batch_size, seq_len, hidden_size)` to be consumed by a Transformer.
1066
- """
1067
-
1068
- def __init__(self, config: DogeConfig):
1069
- super().__init__()
1070
-
1071
- self.num_channels = config.num_channels
1072
- self.patch_size = config.patch_size
1073
- self.hidden_dim = config.hidden_size
1074
-
1075
- self.sequence_proj = nn.Conv2d(self.num_channels, self.hidden_dim, kernel_size=self.patch_size, stride=self.patch_size)
1076
- self.state_proj = nn.Linear(self.hidden_dim, self.hidden_dim, bias=config.hidden_bias)
1077
-
1078
- def forward(
1079
- self,
1080
- pixel_values: torch.Tensor,
1081
- ) -> torch.Tensor:
1082
- image_embedding = self.sequence_proj(pixel_values).flatten(2).transpose(1, 2)
1083
- image_embedding = self.state_proj(image_embedding)
1084
- return image_embedding
1085
-
1086
-
1087
- class DogeForCausalVLM(DogeForCausalLM):
1088
- _tied_weights_keys = ["lm_head.weight"]
1089
-
1090
- def __init__(self, config: DogeConfig):
1091
- super().__init__(config)
1092
- self.config = config
1093
- self.pixel_embed = DogePatchEmbedding(config)
1094
-
1095
- # Initialize weights and apply final processing
1096
- self.post_init()
1097
-
1098
- def forward(
1099
- self,
1100
- input_ids: torch.LongTensor = None,
1101
- pixel_values: torch.FloatTensor = None,
1102
- attention_mask: Optional[torch.Tensor] = None,
1103
- position_ids: Optional[torch.LongTensor] = None,
1104
- past_key_values: Optional[torch.Tensor] = None,
1105
- inputs_embeds: Optional[torch.FloatTensor] = None,
1106
- labels: Optional[torch.LongTensor] = None,
1107
- use_cache: Optional[bool] = None,
1108
- output_attentions: Optional[bool] = None,
1109
- output_hidden_states: Optional[bool] = None,
1110
- return_dict: Optional[bool] = None,
1111
- cache_position: Optional[torch.LongTensor] = None,
1112
- num_logits_to_keep: int = 0,
1113
- **loss_kwargs,
1114
- ) -> Union[Tuple, CausalLMOutputWithPast]:
1115
- # TODO: @wubingheng111: refer to Llava for implementating the forward method
1116
- ...
1117
-
1118
- def prepare_inputs_for_generation(
1119
- self,
1120
- input_ids=None,
1121
- pixel_values=None,
1122
- past_key_values=None,
1123
- input_embeds=None,
1124
- attention_mask=None,
1125
- cache_position=None,
1126
- num_logits_to_keep=None,
1127
- **kwargs,
1128
- ):
1129
- model_inputs = self.model.prepare_inputs_for_generation(
1130
- input_ids,
1131
- past_key_values=past_key_values,
1132
- inputs_embeds=input_embeds,
1133
- attention_mask=attention_mask,
1134
- cache_position=cache_position,
1135
- num_logits_to_keep=num_logits_to_keep,
1136
- **kwargs,
1137
- )
1138
-
1139
- if cache_position[0] == 0:
1140
- model_inputs["pixel_values"] = pixel_values
1141
-
1142
- return model_inputs
1143
-
1144
-
1145
  @add_start_docstrings(
1146
  """
1147
  The Doge Model transformer with a sequence classification head on top (linear layer).
1148
 
1149
- [`DogeForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do.
 
1150
 
1151
- Since it does classification on the last token, it requires to know the position of the last token.
1152
- If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row.
1153
- If no `pad_token_id` is defined, it simply takes the last value in each row of the batch.
1154
- Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch).
1155
- """
 
 
1156
  )
1157
  class DogeForSequenceClassification(DogePreTrainedModel):
1158
  def __init__(self, config: DogeConfig):
1159
  super().__init__(config)
1160
- self.config = config
1161
  self.num_labels = config.num_labels
1162
 
1163
  self.model = DogeModel(config)
1164
- self.classifier = nn.Linear(config.hidden_size, self.num_labels, bias=False)
 
1165
 
1166
  # Initialize weights and apply final processing
1167
- self.init_weights()
1168
 
1169
  def get_input_embeddings(self):
1170
  return self.model.word_embed
@@ -1188,14 +1152,14 @@ class DogeForSequenceClassification(DogePreTrainedModel):
1188
  ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1189
  r"""
1190
  labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1191
- Labels for computing the sequence classification/regression loss.
1192
- Indices should be in `[0, ..., config.num_labels - 1]`.
1193
- If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1194
  """
1195
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1196
 
1197
- outputs = self.model(
1198
- input_ids=input_ids,
1199
  attention_mask=attention_mask,
1200
  position_ids=position_ids,
1201
  past_key_values=past_key_values,
@@ -1205,8 +1169,8 @@ class DogeForSequenceClassification(DogePreTrainedModel):
1205
  output_hidden_states=output_hidden_states,
1206
  return_dict=return_dict,
1207
  )
1208
- hidden_states = outputs[0]
1209
- logits = self.classifier(hidden_states)
1210
 
1211
  if input_ids is not None:
1212
  batch_size = input_ids.shape[0]
@@ -1216,35 +1180,36 @@ class DogeForSequenceClassification(DogePreTrainedModel):
1216
  if self.config.pad_token_id is None and batch_size != 1:
1217
  raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1218
  if self.config.pad_token_id is None:
1219
- sequence_lengths = -1
 
 
 
 
 
1220
  else:
1221
- if input_ids is not None:
1222
- # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1223
- sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1224
- sequence_lengths = sequence_lengths % input_ids.shape[-1]
1225
- sequence_lengths = sequence_lengths.to(logits.device)
1226
- else:
1227
- sequence_lengths = -1
1228
 
1229
- pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1230
 
1231
  loss = None
1232
  if labels is not None:
1233
- loss = self.loss_function(
1234
- logits=logits,
1235
- labels=labels,
1236
- pooled_logits=pooled_logits,
1237
- config=self.config,
1238
- )
1239
 
1240
  if not return_dict:
1241
- output = (pooled_logits,) + outputs[1:]
1242
  return ((loss,) + output) if loss is not None else output
1243
 
1244
  return SequenceClassifierOutputWithPast(
1245
  loss=loss,
1246
  logits=pooled_logits,
1247
- past_key_values=outputs.past_key_values,
1248
- hidden_states=outputs.hidden_states,
1249
- attentions=outputs.attentions,
1250
  )
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/doge/modular_doge.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_doge.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
  # coding=utf-8
8
  # Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
9
  #
10
  # This code is based on the Wonderful Matrices paper implementation.
11
+ # The Doge family of small language models is trained by Jingze Shi.
 
12
  #
13
  # Licensed under the Apache License, Version 2.0 (the "License");
14
  # you may not use this file except in compliance with the License.
 
21
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22
  # See the License for the specific language governing permissions and
23
  # limitations under the License.
 
24
 
25
  import math
26
  from typing import Callable, List, Optional, Tuple, Union
27
 
28
  import torch
29
  import torch.nn.functional as F
 
30
  from torch import nn
31
 
32
  from transformers.activations import ACT2FN
33
  from transformers.cache_utils import Cache, DynamicCache, StaticCache
34
  from transformers.generation import GenerationMixin
35
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
36
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
 
 
 
37
  from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
38
  from transformers.modeling_utils import PreTrainedModel
39
  from transformers.processing_utils import Unpack
 
41
  LossKwargs,
42
  add_start_docstrings,
43
  add_start_docstrings_to_model_forward,
44
+ is_torch_flex_attn_available,
45
  logging,
46
  replace_return_docstrings,
47
  )
48
  from .configuration_doge import DogeConfig
49
 
50
+ if is_torch_flex_attn_available():
 
 
 
 
 
51
  from torch.nn.attention.flex_attention import flex_attention
52
 
 
53
  logger = logging.get_logger(__name__)
54
 
55
  _CONFIG_FOR_DOC = "DogeConfig"
56
 
57
 
58
+ class DogeRMSNorm(nn.Module):
59
  def __init__(self, hidden_size, eps=1e-6):
60
  """
61
+ DogeRMSNorm is equivalent to T5LayerNorm
62
  """
63
  super().__init__()
64
  self.weight = nn.Parameter(torch.ones(hidden_size))
 
75
  return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
76
 
77
 
78
+ class DogeResidual(nn.Module):
79
  def __init__(self, hidden_size):
80
  super().__init__()
81
  self.weight = nn.Parameter(torch.ones(hidden_size))
 
87
  return f"{tuple(self.weight.shape)}"
88
 
89
 
90
+ class DogeRotaryEmbedding(nn.Module):
91
+ def __init__(self, config: DogeConfig, device=None):
92
  super().__init__()
93
+ # BC: "rope_type" was originally "type"
94
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
 
95
  self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
96
  else:
97
  self.rope_type = "default"
98
  self.max_seq_len_cached = config.max_position_embeddings
99
  self.original_max_seq_len = config.max_position_embeddings
 
100
 
101
  self.config = config
102
  self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
103
 
104
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
105
  self.register_buffer("inv_freq", inv_freq, persistent=False)
106
  self.original_inv_freq = self.inv_freq
107
 
 
113
  """
114
  seq_len = torch.max(position_ids) + 1
115
  if seq_len > self.max_seq_len_cached: # growth
116
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
 
 
117
  self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
118
  self.max_seq_len_cached = seq_len
119
 
120
  if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
121
+ # This .to() is needed if the model has been moved to a device after being initialized (because
122
+ # the buffer is automatically moved, but not the original copy)
123
+ self.original_inv_freq = self.original_inv_freq.to(device)
124
  self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
125
  self.max_seq_len_cached = self.original_max_seq_len
126
 
 
129
  if "dynamic" in self.rope_type:
130
  self._dynamic_frequency_update(position_ids, device=x.device)
131
 
132
+ # Core RoPE block
133
  inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
134
  position_ids_expanded = position_ids[:, None, :].float()
135
  # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
 
149
 
150
 
151
  def rotate_half(x):
152
+ """Rotates half the hidden dims of the input."""
 
 
153
  x1 = x[..., : x.shape[-1] // 2]
154
  x2 = x[..., x.shape[-1] // 2 :]
155
  return torch.cat((-x2, x1), dim=-1)
156
 
157
 
158
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
159
  """Applies Rotary Position Embedding to the query and key tensors.
160
 
161
  Args:
 
167
  Deprecated and unused.
168
  unsqueeze_dim (`int`, *optional*, defaults to 1):
169
  The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
170
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
171
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
172
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
173
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
174
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
175
  Returns:
176
  `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
177
  """
 
184
 
185
  def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
186
  """
187
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
188
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
189
  """
190
  batch, num_key_value_heads, slen, head_dim = hidden_states.shape
191
  if n_rep == 1:
 
194
  return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
195
 
196
 
197
+ def eager_attention_forward(
198
+ module: nn.Module,
199
+ query: torch.Tensor,
200
+ key: torch.Tensor,
201
+ value: torch.Tensor,
202
+ attention_mask: Optional[torch.Tensor],
203
+ scaling: float,
204
+ dropout: float = 0.0,
205
+ **kwargs,
206
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
207
+ key_states = repeat_kv(key, module.num_key_value_groups)
208
+ value_states = repeat_kv(value, module.num_key_value_groups)
209
+
210
+ attn_weights = torch.matmul(query, key_states.transpose(-1, -2)) * scaling
211
+ if attention_mask is not None:
212
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
213
+ attn_weights = attn_weights + causal_mask
214
+
215
+ attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
216
+ attn_weights = F.dropout(attn_weights, p=dropout, training=module.training)
217
+ attn_output = torch.matmul(attn_weights, value_states)
218
+ attn_output = attn_output.transpose(1, 2).contiguous()
219
+
220
+ return attn_output, attn_weights
221
+
222
+
223
+ def sdpa_attention_forward(
224
+ module: nn.Module,
225
+ query: torch.Tensor,
226
+ key: torch.Tensor,
227
+ value: torch.Tensor,
228
+ attention_mask: Optional[torch.Tensor],
229
+ dropout: float = 0.0,
230
+ scaling: Optional[float] = None,
231
+ is_causal: Optional[bool] = None,
232
+ **kwargs,
233
+ ) -> Tuple[torch.Tensor, None]:
234
+ key = repeat_kv(key, module.num_key_value_groups)
235
+ value = repeat_kv(value, module.num_key_value_groups)
236
+
237
+ causal_mask = attention_mask
238
+ if attention_mask is not None:
239
+ causal_mask = causal_mask[:, :, :, : key.shape[-2]]
240
+
241
+ # SDPA with memory-efficient backend is bugged with non-contiguous inputs and custom attn_mask for some torch versions
242
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
243
+ query = query.contiguous()
244
+ key = key.contiguous()
245
+ value = value.contiguous()
246
+
247
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
248
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
249
+ if is_causal is None:
250
+ is_causal = causal_mask is None and query.shape[2] > 1
251
+
252
+ # Shapes (e.g. query.shape[2]) are tensors during jit tracing, resulting in `is_causal` being a tensor.
253
+ # We convert it to a bool for the SDPA kernel that only accepts bools.
254
+ if torch.jit.is_tracing() and isinstance(is_causal, torch.Tensor):
255
+ is_causal = is_causal.item()
256
+
257
+ # NOTE: As of pytorch 2.5.1, SDPA backward pass of cuDNN is still incorrect, so we disable cuDNN SDPA (see https://github.com/pytorch/pytorch/issues/138581)
258
+ torch.backends.cuda.enable_cudnn_sdp(False)
259
+ attn_output = F.scaled_dot_product_attention(
260
+ query=query,
261
+ key=key,
262
+ value=value,
263
+ attn_mask=causal_mask,
264
+ dropout_p=dropout,
265
+ scale=scaling,
266
+ is_causal=is_causal,
267
+ )
268
+ attn_output = attn_output.transpose(1, 2).contiguous()
269
+
270
+ return attn_output, None
271
+
272
+
273
+ def flex_attention_forward(
274
+ module: nn.Module,
275
+ query: torch.Tensor,
276
+ key: torch.Tensor,
277
+ value: torch.Tensor,
278
+ attention_mask: Optional[torch.Tensor],
279
+ scaling: Optional[float] = None,
280
+ is_causal: Optional[bool] = None,
281
+ softcap: Optional[float] = None,
282
+ head_mask: Optional[torch.Tensor] = None,
283
+ **kwargs,
284
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
285
+ causal_mask = attention_mask
286
+ if attention_mask is not None:
287
+ causal_mask = causal_mask[:, :, :, : key.shape[-2]]
288
+
289
+ if is_causal is None:
290
+ is_causal = causal_mask is None and query.shape[2] > 1
291
+
292
+ def causal_mod(score, batch, head, q_idx, kv_idx):
293
+ if softcap is not None:
294
+ score = softcap * torch.tanh(score / softcap)
295
+ if causal_mask is not None:
296
+ score = score + causal_mask[batch][0][q_idx][kv_idx]
297
+ if head_mask is not None:
298
+ score = score + head_mask[batch][head][0][0]
299
+ return score
300
+
301
+ def dynamic_mod(score, batch, head, q_idx, kv_idx):
302
+ if softcap is not None:
303
+ score = softcap * torch.tanh(score / softcap)
304
+ if causal_mask is not None:
305
+ score = score + causal_mask[batch][head][q_idx][kv_idx]
306
+ if head_mask is not None:
307
+ score = score + head_mask[batch][head][0][0]
308
+ return score
309
+
310
+ # TODO: flex_attention: As of pytorch 2.5.1, captured buffers that require grad are not yet supported.
311
+ # NOTE: So we only use flex_attention in inference mode.
312
+ mask_mod = causal_mod if is_causal or module.training else dynamic_mod
313
+
314
+ attn_output, attention_weights = flex_attention(
315
+ query=query,
316
+ key=key,
317
+ value=value,
318
+ score_mod=mask_mod,
319
+ enable_gqa=True,
320
+ scale=scaling,
321
+ # Last time checked on PyTorch == 2.5.1: Flex Attention always computes the lse regardless.
322
+ # For simplification, we thus always return it as no additional computations are introduced.
323
+ return_lse=True,
324
+ )
325
+ # lse is returned in float32
326
+ attention_weights = attention_weights.to(value.dtype)
327
+ attn_output = attn_output.transpose(1, 2).contiguous()
328
+
329
+ return attn_output, attention_weights
330
+
331
+
332
+ ALL_ATTENTION_FUNCTIONS = {
333
+ "eager": eager_attention_forward,
334
+ "sdpa": sdpa_attention_forward,
335
+ "flex_attention": flex_attention_forward,
336
+ }
337
+
338
+
339
  class DogeDynamicMaskAttention(nn.Module):
340
  """Dynamic Mask Attention from 'Wonderful Matrices' paper."""
341
 
 
343
  super().__init__()
344
  self.config = config
345
  self.layer_idx = layer_idx
346
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
347
  self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
348
+ self.scaling = self.head_dim**-0.5
349
  self.attention_dropout = config.attention_dropout
350
  self.dynamic_mask_ratio = config.dynamic_mask_ratio
 
351
 
 
 
 
 
 
 
 
352
  self.q_proj = nn.Linear(
353
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.hidden_bias
 
 
354
  )
355
  self.k_proj = nn.Linear(
356
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.hidden_bias
 
 
357
  )
358
  self.v_proj = nn.Linear(
359
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.hidden_bias
 
 
 
 
 
 
360
  )
361
+ # dynamic mask for the QK^T attention weights matrix
362
+ self.A = nn.Parameter(torch.zeros(config.num_attention_heads))
363
  self.dt_proj = nn.Linear(
364
+ config.num_key_value_heads * self.head_dim, config.num_attention_heads, bias=config.hidden_bias
 
 
365
  )
366
  self.o_proj = nn.Linear(
367
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.hidden_bias
 
 
368
  )
369
 
370
  def forward(
 
375
  past_key_value: Optional[Cache] = None,
376
  cache_position: Optional[torch.LongTensor] = None,
377
  **kwargs,
378
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
379
  input_shape = hidden_states.shape[:-1]
380
  hidden_shape = (*input_shape, -1, self.head_dim)
381
 
 
384
  value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
385
 
386
  cos, sin = position_embeddings
387
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
388
 
389
  if past_key_value is not None:
390
  # sin and cos are specific to RoPE models; cache_position needed for the static cache
391
  cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
392
  key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
393
 
394
+ # calculate dynamic mask from value_states
395
+ dt_states = self.dt_proj(
396
+ value_states.transpose(1, 2).reshape(value_states.shape[0], value_states.shape[-2], -1)
397
+ )
398
+ dynamic_mask = torch.exp(self.A * F.softplus(dt_states)).transpose(-1, -2)
 
 
 
399
  attn_mask = self.prepare_dynamic_mask(
400
  hidden_states=hidden_states,
401
  dynamic_mask=dynamic_mask,
 
403
  attention_mask=attention_mask,
404
  )
405
 
406
+ attention_interface: Callable = eager_attention_forward
407
  if self.config._attn_implementation != "eager":
408
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
409
+ logger.warning_once(
410
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
411
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
412
+ )
413
+ else:
414
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
415
+
416
+ attn_output, attn_weights = attention_interface(
417
+ self,
418
  query_states,
419
  key_states,
420
  value_states,
 
426
 
427
  attn_output = attn_output.reshape(*input_shape, -1).contiguous()
428
  attn_output = self.o_proj(attn_output)
429
+ return attn_output, attn_weights
430
 
431
  def prepare_dynamic_mask(
432
  self,
 
459
  attn_mask = attention_mask
460
 
461
  return attn_mask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
462
 
463
 
464
  class DogeMLP(nn.Module):
 
465
  def __init__(self, config: DogeConfig):
466
  super().__init__()
467
  self.hidden_dim = config.hidden_size
 
496
  self.num_keys = int(math.sqrt(self.num_cdmoe_experts))
497
 
498
  # queries and keys for retrieval experts
499
+ self.queries_proj = nn.Linear(self.hidden_dim, self.num_cdmoe_heads * self.expert_retrieval_dim, bias=False)
500
+ self.keys = nn.Parameter(torch.zeros(self.num_cdmoe_heads, self.expert_retrieval_dim, self.num_keys))
501
 
502
  # experts
503
+ self.down_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
504
  self.up_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
505
 
506
  def forward(
 
510
  ) -> torch.Tensor:
511
  bsz, seq_len, _ = hidden_states.shape
512
 
513
+ # get routing weights with queries and keys
514
+ queries = self.queries_proj(hidden_states)
515
+ queries = queries.view(2, self.num_cdmoe_heads, bsz * seq_len, -1)
516
+ keys = self.keys.view(2, self.num_cdmoe_heads, -1, self.num_keys)
517
+ routing_weights = torch.matmul(queries, keys)
518
+ routing_weights = routing_weights.transpose(-2, -3).view(2, bsz, seq_len, self.num_cdmoe_heads, self.num_keys)
519
+
520
+ # get experts with the highest routing weights
521
+ (scores_x, scores_y), (indices_x, indices_y) = routing_weights.topk(self.num_cdmoe_experts_per_head, dim=-1)
522
+ all_scores = scores_x.unsqueeze(-1) + scores_y.unsqueeze(-2)
523
+ all_scores = all_scores.view(*scores_x.shape[:-1], -1)
524
+ all_indices = (indices_x.unsqueeze(-1) * self.num_keys) + indices_y.unsqueeze(-2)
525
+ all_indices = all_indices.view(*indices_x.shape[:-1], -1)
 
 
526
  scores, pk_indices = all_scores.topk(self.num_cdmoe_experts_per_head, dim=-1)
527
  indices = all_indices.gather(-1, pk_indices)
528
  down_embed = self.down_embed(indices)
529
  up_embed = self.up_embed(indices)
530
 
531
  # mix experts states with cross domain states
532
+ experts_weights = torch.sum(hidden_states[:, :, None, None, :] * down_embed, dim=-1)
533
  experts_weights = self.act_fn(experts_weights) * scores.softmax(dim=-1)
534
+ experts_states = torch.sum(experts_weights[:, :, :, :, None] * up_embed, dim=(-2, -3))
535
  hidden_states = self.down_proj(self.act_fn(self.gate_proj(hidden_states)) * self.up_proj(hidden_states))
536
  hidden_states = hidden_states + experts_states
537
  return hidden_states
 
542
  super().__init__()
543
  self.hidden_dropout = config.hidden_dropout
544
 
545
+ self.pre_layernorm = DogeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
546
  self.self_attn = DogeDynamicMaskAttention(config=config, layer_idx=layer_idx)
547
+ self.pre_residual = DogeResidual(config.hidden_size)
548
 
549
+ self.post_layernorm = DogeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
550
+ self.feed_forward = DogeMLP(config) if not config.is_moe else DogeCDMoE(config)
551
+ self.post_residual = DogeResidual(config.hidden_size)
552
 
553
  def forward(
554
  self,
 
562
  position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
563
  **kwargs,
564
  ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
 
565
  # sequence transformation
566
  residual = hidden_states
567
  hidden_states = self.pre_layernorm(hidden_states)
568
+ hidden_states, self_attn_weights = self.self_attn(
569
  hidden_states=hidden_states,
570
  attention_mask=attention_mask,
571
  position_ids=position_ids,
572
  past_key_value=past_key_value,
573
+ output_attentions=output_attentions,
574
+ use_cache=use_cache,
575
  cache_position=cache_position,
576
  position_embeddings=position_embeddings,
577
  **kwargs,
 
609
  load the weights associated with the model, only the configuration. Check out the
610
  [`~PreTrainedModel.from_pretrained`] method to load the model weights.
611
  """
612
+
613
+
614
  @add_start_docstrings(
615
  "The bare Doge Model outputting raw hidden-states without any specific head on top.",
616
  DOGE_START_DOCSTRING,
 
622
  _no_split_modules = ["DogeDecoderLayer"]
623
  _skip_keys_device_placement = ["past_key_values"]
624
  _supports_sdpa = True
625
+ # _supports_flex_attn = True # TODO: enable this when flex_attention is fully supported
626
  _supports_cache_class = True
627
  _supports_quantized_cache = True
628
  _supports_static_cache = True
 
733
  self.vocab_size = config.vocab_size
734
 
735
  self.word_embed = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
736
+ self.rotary_emb = DogeRotaryEmbedding(config)
737
  self.layers = nn.ModuleList(
738
  [DogeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
739
  )
740
+ self.final_layernorm = DogeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
741
  self.gradient_checkpointing = False
742
 
743
  # Initialize weights and apply final processing
 
864
  past_key_values: Cache,
865
  output_attentions: bool,
866
  ):
867
+ if self.config._attn_implementation == "flash_attention_2":
868
+ if attention_mask is not None and (attention_mask == 0.0).any():
869
+ return attention_mask
870
+ return None
871
+
872
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
873
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
874
+ # to infer the attention mask.
875
  past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
876
  using_static_cache = isinstance(past_key_values, StaticCache)
877
 
878
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
879
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
880
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
881
+ attention_mask,
882
+ inputs_embeds=input_tensor,
883
+ past_key_values_length=past_seen_tokens,
884
+ is_training=self.training,
885
+ ):
886
+ return None
887
+
888
  dtype, device = input_tensor.dtype, input_tensor.device
889
  sequence_length = input_tensor.shape[1]
890
  if using_static_cache:
 
896
  else past_seen_tokens + sequence_length + 1
897
  )
898
 
899
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
900
  causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
901
+ attention_mask,
902
  sequence_length=sequence_length,
903
  target_length=target_length,
904
  dtype=dtype,
 
907
  batch_size=input_tensor.shape[0],
908
  )
909
 
910
+ if (
911
+ self.config._attn_implementation == "sdpa"
912
+ and attention_mask is not None
913
+ and attention_mask.device.type in ["cuda", "xpu"]
914
+ and not output_attentions
915
+ ):
916
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
917
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
918
+ # Details: https://github.com/pytorch/pytorch/issues/110213
919
+ min_dtype = torch.finfo(dtype).min
920
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
921
+
922
  return causal_mask
923
+
924
  @staticmethod
925
  def _prepare_4d_causal_attention_mask_with_cache_position(
926
+ attention_mask: torch.Tensor,
927
+ sequence_length: int,
928
+ target_length: int,
929
+ dtype: torch.dtype,
930
+ device: torch.device,
931
+ cache_position: torch.Tensor,
932
+ batch_size: int,
933
  **kwargs,
934
  ):
935
  """
 
960
  else:
961
  min_dtype = torch.finfo(dtype).min
962
  causal_mask = torch.full(
963
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
 
964
  )
965
  if sequence_length != 1:
966
  causal_mask = torch.triu(causal_mask, diagonal=1)
 
978
  return causal_mask
979
 
980
 
 
 
 
981
  class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
982
  _tied_weights_keys = ["lm_head.weight"]
983
  _tp_plan = {"lm_head": "colwise_rep"}
 
1003
 
1004
  def set_output_embeddings(self, new_embeddings):
1005
  self.lm_head = new_embeddings
1006
+
1007
  def get_decoder(self):
1008
  return self.model
1009
 
 
1025
  output_hidden_states: Optional[bool] = None,
1026
  return_dict: Optional[bool] = None,
1027
  cache_position: Optional[torch.LongTensor] = None,
1028
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1029
+ **kwargs: Unpack[LossKwargs],
1030
  ) -> Union[Tuple, CausalLMOutputWithPast]:
1031
  r"""
1032
  Args:
 
1035
  config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1036
  (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1037
 
1038
+ logits_to_keep (`int`, *optional*):
1039
+ If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
1040
  `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
1041
  token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
1042
+ If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
1043
+ This is useful when using packed tensor format (single dimension for batch and sequence length).
1044
 
1045
  Returns:
1046
 
 
1049
  ```python
1050
  >>> from transformers import AutoTokenizer, AutoModelForCausalLM
1051
 
1052
+ >>> model = AutoModelForCausalLM.from_pretrained("SmallDoge/Doge-20M")
1053
+ >>> tokenizer = AutoTokenizer.from_pretrained("SmallDoge/Doge-20M")
1054
 
1055
  >>> prompt = "Hey, are you conscious? Can you talk to me?"
1056
  >>> inputs = tokenizer(prompt, return_tensors="pt")
 
1082
  )
1083
 
1084
  hidden_states = outputs[0]
 
1085
  # only compute necessary logits, and do not upcast them to float if we are not computing the loss
1086
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1087
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1088
 
1089
  loss = None
1090
  if labels is not None:
 
1103
  )
1104
 
1105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1106
  @add_start_docstrings(
1107
  """
1108
  The Doge Model transformer with a sequence classification head on top (linear layer).
1109
 
1110
+ [`DogeForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1111
+ (e.g. GPT-2) do.
1112
 
1113
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1114
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1115
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1116
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1117
+ each row of the batch).
1118
+ """,
1119
+ DOGE_START_DOCSTRING,
1120
  )
1121
  class DogeForSequenceClassification(DogePreTrainedModel):
1122
  def __init__(self, config: DogeConfig):
1123
  super().__init__(config)
 
1124
  self.num_labels = config.num_labels
1125
 
1126
  self.model = DogeModel(config)
1127
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1128
+ self.config = config
1129
 
1130
  # Initialize weights and apply final processing
1131
+ self.post_init()
1132
 
1133
  def get_input_embeddings(self):
1134
  return self.model.word_embed
 
1152
  ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1153
  r"""
1154
  labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1155
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1156
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1157
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1158
  """
1159
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1160
 
1161
+ transformer_outputs = self.model(
1162
+ input_ids,
1163
  attention_mask=attention_mask,
1164
  position_ids=position_ids,
1165
  past_key_values=past_key_values,
 
1169
  output_hidden_states=output_hidden_states,
1170
  return_dict=return_dict,
1171
  )
1172
+ hidden_states = transformer_outputs[0]
1173
+ logits = self.score(hidden_states)
1174
 
1175
  if input_ids is not None:
1176
  batch_size = input_ids.shape[0]
 
1180
  if self.config.pad_token_id is None and batch_size != 1:
1181
  raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1182
  if self.config.pad_token_id is None:
1183
+ last_non_pad_token = -1
1184
+ elif input_ids is not None:
1185
+ # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
1186
+ non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
1187
+ token_indices = torch.arange(input_ids.shape[-1], device=logits.device)
1188
+ last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
1189
  else:
1190
+ last_non_pad_token = -1
1191
+ logger.warning_once(
1192
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1193
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1194
+ )
 
 
1195
 
1196
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
1197
 
1198
  loss = None
1199
  if labels is not None:
1200
+ loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
 
 
 
 
 
1201
 
1202
  if not return_dict:
1203
+ output = (pooled_logits,) + transformer_outputs[1:]
1204
  return ((loss,) + output) if loss is not None else output
1205
 
1206
  return SequenceClassifierOutputWithPast(
1207
  loss=loss,
1208
  logits=pooled_logits,
1209
+ past_key_values=transformer_outputs.past_key_values,
1210
+ hidden_states=transformer_outputs.hidden_states,
1211
+ attentions=transformer_outputs.attentions,
1212
  )
1213
+
1214
+
1215
+ __all__ = ["DogeForCausalLM", "DogeModel", "DogePreTrainedModel", "DogeForSequenceClassification"]