File size: 15,243 Bytes
52bb403
 
 
 
 
 
 
7c43870
81658fb
52bb403
 
 
34dd82a
9fdbe51
 
 
 
 
 
 
 
 
 
 
 
5aa8b74
d37cdb2
5aa8b74
d37cdb2
8658bac
5867a45
 
d37cdb2
8658bac
41d840a
8658bac
827f943
 
 
8658bac
827f943
 
d37cdb2
827f943
 
 
 
5aa8b74
827f943
 
 
 
 
 
 
 
 
 
 
 
5aa8b74
827f943
 
d37cdb2
 
 
827f943
 
d37cdb2
827f943
 
 
 
5aa8b74
6f917da
 
827f943
 
 
5aa8b74
827f943
 
 
 
 
 
 
 
 
 
 
 
 
d37cdb2
827f943
 
d37cdb2
 
 
 
 
 
 
 
827f943
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d37cdb2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41d840a
d37cdb2
 
 
2258802
d37cdb2
 
 
 
 
 
 
 
 
 
 
 
2258802
d37cdb2
 
 
 
 
41d840a
d37cdb2
 
41d840a
d37cdb2
 
 
41d840a
d37cdb2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8658bac
52bb403
 
 
 
 
 
 
 
 
 
 
f23ed8f
52bb403
f23ed8f
 
 
 
 
 
 
 
 
 
 
 
52bb403
 
 
 
f23ed8f
 
 
52bb403
 
 
 
 
 
 
 
f23ed8f
 
2917b63
 
 
 
 
 
f23ed8f
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
# my_custom_olmoe/modeling_custom.py

import torch
import torch.nn as nn
import torch.nn.functional as F

# 导入官方实现(注意根据你的 transformers 版本调整导入路径)
from transformers.models.olmoe.modeling_olmoe import OlmoeForCausalLM, OlmoeSparseMoeBlock, OlmoeMLP
from .configuration_densebackward_olmoe0125 import DenseBackwardOLMoEConfig


class DenseBackwardOlmoeSparseMoeBlock(OlmoeSparseMoeBlock):
    
    """
    继承自官方 OlmoeSparseMoeBlock,实现 dense backward 功能:
    前向输出依旧保持与官方相同(即稀疏计算结果),
    但在反向传播时,通过直通梯度让 dense 计算的梯度传递回来,
    dense 输出通过对每个专家在所有 token 上进行计算,并利用全 routing 权重加权获得。
    
    输入:
        hidden_states: Tensor, shape (batch_size, sequence_length, hidden_dim)
    输出:
        final_output: Tensor, shape (batch_size, sequence_length, hidden_dim)
        router_logits: Tensor, shape (batch_size * sequence_length, num_experts)
    """
    def forward_partscale_fixep_norm_dtch(self, hidden_states: torch.Tensor):
        """
        forward_partscale_fixep_norm_dtch
        """
        batch_size, seq_length, hidden_dim = hidden_states.shape
        dtype = hidden_states.dtype
        device = hidden_states.device

        flat_hidden = hidden_states.view(-1, hidden_dim)  # (B*seq_len, hidden_dim)
        N_tokens = flat_hidden.size(0)

        # Compute routing logic
        router_logits = self.gate(flat_hidden).to(dtype=dtype)  # (B*L, num_experts)
        routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)  # (B*L, num_experts)

        # Select top-k experts
        routing_weights_topk, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
        if self.norm_topk_prob:
            norm_ratio = routing_weights_topk.sum(dim=-1, keepdim=True)
            # Normalize top-k routing weights
            routing_weights_topk = routing_weights_topk / norm_ratio
            # Only scale the selected top-k positions in routing_weights
            mask = F.one_hot(selected_experts, num_classes=self.num_experts).sum(dim=1).to(dtype)
            # ------------------------------------Choose Section-----------------------------------------------
            # current --> partscale_fix_expert implementation
            routing_weights = routing_weights * (1.0 - mask) / norm_ratio.detach() + routing_weights * mask / norm_ratio

            # should be --> the gated implemenation, by comment out the line above and uncomment the two lines below
            # gated = routing_weights.detach() * mask + (routing_weights - routing_weights.detach())
            # routing_weights = gated / gated.sum(dim=-1, keepdim=True)
            # ------------------------------------Choose Section-----------------------------------------------

        routing_weights_topk = routing_weights_topk.to(dtype=dtype)

        # Convert full routing_weights to consistent dtype for dense accumulation
        routing_weights = routing_weights.to(dtype=dtype)

        # Prepare accumulators: one for dense_outputs, one for sparse_outputs
        dense_outputs = torch.zeros((N_tokens, hidden_dim), dtype=dtype, device=device)
        sparse_outputs = torch.zeros((N_tokens, hidden_dim), dtype=dtype, device=device)

        # For mapping top-k positions when accumulating sparse_outputs
        # selected_experts: (N_tokens, top_k)

        for expert_idx in range(self.num_experts):
            expert_layer = self.experts[expert_idx]
            # Compute current expert output for all tokens
            expert_output = expert_layer(flat_hidden).to(dtype=dtype)  # (N_tokens, hidden_dim)
            activation_mask = (selected_experts == expert_idx).any(dim=1).float().unsqueeze(-1).to(dtype)
            if expert_output.requires_grad:
                expert_output.register_hook(lambda grad, mask=activation_mask: grad * mask)
            expert_output = expert_output.to(dtype=dtype)
            # Dense accumulation: multiply by full routing weight and add
            weight_full = routing_weights[:, expert_idx].unsqueeze(-1)  # (N_tokens, 1)
            dense_outputs = dense_outputs + expert_output * weight_full

            # Sparse accumulation: find tokens where this expert is among top_k
            # matches: Boolean mask where selected_experts == expert_idx → shape (N_tokens, top_k)
            matches = (selected_experts == expert_idx)
            if matches.any():
                # locations: tuple of (token_indices, k_indices)
                token_indices, k_indices = torch.where(matches)
                # corresponding top-k weights
                weights_topk = routing_weights_topk[token_indices, k_indices].unsqueeze(-1)  # (num_matches, 1)
                # Accumulate sparse_outputs only for matched tokens
                sparse_outputs[token_indices] = sparse_outputs[token_indices] + expert_output[token_indices] * weights_topk

        # Combine sparse forward output and dense backward output
        final_flat = sparse_outputs.detach() + (dense_outputs - dense_outputs.detach())
        final_flat = final_flat.to(dtype=dtype)
        final_output = final_flat.view(batch_size, seq_length, hidden_dim)

        return final_output, router_logits








    
    # def forward(self, hidden_states: torch.Tensor):
    #     """
    #     Gate version of implementation of straight-through, π -> mask, dmask / dπ = 1
    #     """
    #     batch_size, seq_length, hidden_dim = hidden_states.shape
    #     dtype = hidden_states.dtype
    #     device = hidden_states.device

    #     flat_hidden = hidden_states.view(-1, hidden_dim)  # (B*seq_len, hidden_dim)
    #     N_tokens = flat_hidden.size(0)

    #     # 1) router & softmax
    #     router_logits = self.gate(flat_hidden).to(dtype=dtype)                # (N, num_experts)
    #     routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)  # (N, num_experts)

    #     # 2) top-K selection
    #     _, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)  # (N, K), (N, K)
        
    #     # 3) build hard & ste masks
    #     mask_hard = F.one_hot(selected_experts, num_classes=self.num_experts).sum(dim=1).to(dtype)     # (N, num_experts)
    #     #mask_ste = mask_hard + (routing_weights - routing_weights.detach())
    #     mask_ste = mask_hard + (router_logits - router_logits.detach())

    #     # 4) compute gated weights = π * mask, then optionally renormalize
    #     gated = routing_weights * mask_ste                              # zero-out non-TopK
    #     if self.norm_topk_prob:
    #         norm_ratio = gated.sum(dim=-1, keepdim=True)                # (N,1)
    #         gated = gated / norm_ratio                                  # normalized TopK

    #     # 5)prepare accumulators
    #     dense_outputs = torch.zeros((N_tokens, hidden_dim), dtype=dtype, device=device)
    #     sparse_outputs = torch.zeros((N_tokens, hidden_dim), dtype=dtype, device=device)

    #     for expert_idx, expert_layer in enumerate(self.experts):
    #         expert_output = expert_layer(flat_hidden).to(dtype=dtype)  # (N_tokens, hidden_dim)
    #         activation_mask = (selected_experts == expert_idx).any(dim=1).float().unsqueeze(-1).to(dtype)

    #         if expert_output.requires_grad:
    #             expert_output.register_hook(lambda grad, mask=activation_mask: grad * mask)
            
    #         # a) Dense-STE backward uses gated weights
    #         weights = gated[:, expert_idx].unsqueeze(-1)  # (N_tokens, 1)
    #         dense_outputs += expert_output * weights

    #         # b) Sparse forward -- find tokens where this expert is among top_k (active experts)
    #         active = (selected_experts == expert_idx)
    #         if active.any():
    #             token_indices, _ = torch.where(active)
    #             weights_topk = gated[token_indices, expert_idx].unsqueeze(-1)  # (num_matches,1)
    #             sparse_outputs[token_indices] += expert_output[token_indices] * weights_topk
        
    #     # 6) STE mix: forward from sparse, backward from dense
    #     final_flat = sparse_outputs.detach() + (dense_outputs - dense_outputs.detach())
    #     final_output = final_flat.view(batch_size, seq_length, hidden_dim).to(dtype=dtype)

    #     return final_output, router_logits






    # def forward(self, hidden_states: torch.Tensor):
    #     batch_size, seq_length, hidden_dim = hidden_states.shape
    #     # 记录输入张量的数据类型,确保所有计算保持一致
    #     dtype = hidden_states.dtype
    #     device = hidden_states.device
        
    #     flat_hidden = hidden_states.view(-1, hidden_dim)  # (B*seq_len, hidden_dim)
    #     N_tokens = flat_hidden.size(0)

    #     # 计算路由逻辑
    #     router_logits = self.gate(flat_hidden)  # (B*seq_len, num_experts)
    #     # 确保router_logits和flat_hidden数据类型一致
    #     router_logits = router_logits.to(dtype=dtype)
    #     routing_weights = F.softmax(router_logits, dim=1, dtype=dtype)  # (B*seq_len, num_experts)

    #     # 选择top-k专家
    #     routing_weights_topk, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
    #     if self.norm_topk_prob:
    #         routing_weights_topk = routing_weights_topk / routing_weights_topk.sum(dim=-1, keepdim=True)
    #         # 确保归一化后类型一致
    #         routing_weights_topk = routing_weights_topk.to(dtype=dtype)
        
    #     # ---------- 真实计算所有专家输出(密集计算)----------
    #     all_expert_outputs = torch.zeros((N_tokens, self.num_experts, hidden_dim),
    #                                      dtype=dtype, device=device)
        
    #     for expert_idx in range(self.num_experts):
    #         expert_layer = self.experts[expert_idx]
    #         # 对所有token都计算当前专家的输出
    #         expert_output = expert_layer(flat_hidden)  # (N_tokens, hidden_dim)
    #         # 计算当前expert的激活mask,只有激活token梯度被保留
    #         activation_mask = (selected_experts == expert_idx).any(dim=1).float().unsqueeze(-1).to(dtype)
    #         # 注册梯度hook,使得非激活token的梯度被置零
    #         if expert_output.requires_grad:
    #             expert_output.register_hook(lambda grad, mask=activation_mask: grad * mask)
    #         # 确保专家输出与预期类型一致
    #         expert_output = expert_output.to(dtype=dtype)
    #         all_expert_outputs[:, expert_idx, :] = expert_output
        
    #     # ---------- 提取激活专家输出(稀疏前向)- 使用张量批处理 ----------
    #     # 创建索引张量,第一维是token索引,第二维是专家索引
    #     token_indices = torch.arange(N_tokens, device=device).unsqueeze(1).expand(-1, self.top_k)
    #     batch_indices = token_indices.reshape(-1)
    #     expert_indices = selected_experts.reshape(-1)
        
    #     # 批量提取激活专家的输出
    #     selected_outputs = all_expert_outputs[batch_indices, expert_indices].view(N_tokens, self.top_k, hidden_dim)
        
    #     # 扩展权重以便批量相乘
    #     expanded_weights = routing_weights_topk.unsqueeze(-1)  # (N_tokens, top_k, 1)
    #     expanded_weights = expanded_weights.to(dtype=dtype)
        
    #     # 权重乘以专家输出并求和
    #     sparse_output = (selected_outputs * expanded_weights).sum(dim=1)  # (N_tokens, hidden_dim)
        
    #     # ---------- 密集计算聚合(用于反向传播)----------
    #     # 使用所有专家的输出和路由权重计算密集输出
    #     routing_weights_expanded = routing_weights.unsqueeze(-1)  # (N_tokens, num_experts, 1)
    #     routing_weights_expanded = routing_weights_expanded.to(dtype=dtype)
    #     dense_outputs = (all_expert_outputs * routing_weights_expanded).sum(dim=1)  # (N_tokens, hidden_dim)
        
    #     # ---------- 组合稀疏前向和密集反向 ----------
    #     # sparse_output.detach()保留稀疏前向计算图
    #     # (dense_outputs - dense_outputs.detach())只保留密集反向梯度
    #     final_flat = sparse_output.detach() + (dense_outputs - dense_outputs.detach())
    #     final_flat = final_flat.to(dtype=dtype)  # 确保最终输出类型一致
    #     final_output = final_flat.view(batch_size, seq_length, hidden_dim)
        
    #     return final_output, router_logits

class DenseBackwardOLMoEForCausalLM(OlmoeForCausalLM):
    """
    自定义的 Olmoe ForCausalLM 模型,使用新的 DenseBackwardOlmoeSparseMoeBlock 替换原版的 MoE 模块,
    以实现 dense backward 功能。
    
    配置类:DenseBackwardOLMoEConfig
    """
    config_class = DenseBackwardOLMoEConfig
    base_model_prefix = "olmoe"

    def __init__(self, config):
        # 首先调用父类初始化方法
        super().__init__(config)
        
        # 不要尝试重新赋值self,而是从预训练模型加载并更新当前模型
        pretrained_model = OlmoeForCausalLM.from_pretrained("allenai/OLMoE-1B-7B-0125")
        
        # 复制预训练模型的状态到当前模型
        self.config = pretrained_model.config
        self.model = pretrained_model.model
        self.vocab_size = pretrained_model.vocab_size
        self.router_aux_loss_coef = pretrained_model.router_aux_loss_coef
        self.num_experts = pretrained_model.num_experts
        self.lm_head = pretrained_model.lm_head
        
        # 遍历模型中所有 decoder 层,替换每个 OlmoeSparseMoeBlock 为 DenseBackward 版本
        # 此处假设官方模型在 self.model.layers 中组织 decoder 层,
        # 且每层中 mlp 模块包含属性 sparse_moe_block。
        for layer in self.model.layers:
            if hasattr(layer.mlp, "gate"):
                print("111")
                orig_block = layer.mlp
                # 通过直接复制原版属性创建新的块
                new_block = DenseBackwardOlmoeSparseMoeBlock(config)  # 或其他适当参数
                # 然后手动复制需要共享的属性:
                new_block.gate = orig_block.gate
                new_block.experts = orig_block.experts
                new_block.num_experts = orig_block.num_experts
                new_block.top_k = orig_block.top_k
                new_block.norm_topk_prob = orig_block.norm_topk_prob
                layer.mlp = new_block
                print(type(layer.mlp))
        # 释放预训练模型内存
        del pretrained_model
        import gc
        gc.collect()
        torch.cuda.empty_cache()
        print("原始预训练模型已释放")

def main():
    config = DenseBackwardOLMoEConfig(        # 官方模型参数
    model_marker="DenseBackward_olmoe_marker",
)
# 创建自定义模型实例
    model = DenseBackwardOLMoEForCausalLM(config)
    print(type(model))
    print(type(model.model))
    print(type(model.model.layers[0]))
    print(type(model.model.layers[0].mlp))
    print(type(model.model.layers[0].mlp.experts))
if __name__ == "__main__":
    main()