neuralworm commited on
Commit
6695a01
·
verified ·
1 Parent(s): 0298169

Create model.py

Browse files
Files changed (1) hide show
  1. model.py +390 -0
model.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import math
5
+ import hashlib # For generating deterministic values from seed
6
+
7
+ # --- Helper: Entropy Estimator ---
8
+ class EntropyEstimator(nn.Module):
9
+ def __init__(self, d_model, hidden_dim=32, name=""): # Smaller hidden_dim for simplicity
10
+ super().__init__()
11
+ self.fc1 = nn.Linear(d_model, hidden_dim)
12
+ self.fc2 = nn.Linear(hidden_dim, 1)
13
+ self.name = name
14
+
15
+ def forward(self, x, active_mask=None): # x: (batch, seq_len, d_model)
16
+ if active_mask is not None and x.shape[:-1] != active_mask.shape:
17
+ print(f"Warning [{self.name}]: x shape {x.shape[:-1]} and active_mask shape {active_mask.shape} mismatch. Entropy might be inaccurate.")
18
+ # Fallback if mask is problematic, or process only unmasked if shapes allow
19
+ if x.numel() == 0: return torch.tensor(0.0, device=x.device) # Handle empty tensor case
20
+ if active_mask.sum() == 0: return torch.tensor(0.0, device=x.device) # Handle all masked case
21
+ # Try to apply mask if possible, otherwise average all. This part can be tricky.
22
+ # For now, if shapes mismatch significantly, we might average all as a robust fallback.
23
+ # A more robust solution would ensure masks are always correct upstream.
24
+ if x.dim() == active_mask.dim() + 1 and x.shape[:-1] == active_mask.shape : # (B,S,D) and (B,S)
25
+ x_masked = x[active_mask]
26
+ if x_masked.numel() == 0: return torch.tensor(0.0, device=x.device)
27
+ h = F.relu(self.fc1(x_masked))
28
+ return torch.sigmoid(self.fc2(h)).mean() # Mean entropy over active elements
29
+ else: # Fallback if mask application is uncertain
30
+ h = F.relu(self.fc1(x.reshape(-1, x.size(-1))))
31
+ return torch.sigmoid(self.fc2(h)).mean()
32
+
33
+ elif active_mask is None and x.numel() > 0:
34
+ h = F.relu(self.fc1(x.reshape(-1, x.size(-1))))
35
+ return torch.sigmoid(self.fc2(h)).mean()
36
+ elif x.numel() == 0:
37
+ return torch.tensor(0.0, device=x.device) # Handle empty tensor
38
+
39
+ # Default if active_mask is present and correct
40
+ x_masked = x[active_mask]
41
+ if x_masked.numel() == 0: return torch.tensor(0.0, device=x.device)
42
+ h = F.relu(self.fc1(x_masked))
43
+ return torch.sigmoid(self.fc2(h)).mean() # Mean entropy over active elements
44
+
45
+ # --- Helper: Seed Parser ---
46
+ class SeedParser:
47
+ def __init__(self, seed_phrase, seed_number_str, d_model, num_adaptive_blocks, num_sub_modules_per_block):
48
+ self.seed_phrase = seed_phrase
49
+ self.seed_number_str = seed_number_str
50
+ self.d_model = d_model
51
+ self.num_adaptive_blocks = num_adaptive_blocks
52
+ self.num_sub_modules_per_block = num_sub_modules_per_block
53
+ self.debug_prints_enabled = True
54
+
55
+ print(f"--- SeedParser Initialization ---")
56
+ print(f" Seed Phrase: '{self.seed_phrase}'")
57
+ print(f" Seed Number: {self.seed_number_str}")
58
+
59
+ # 1. Process Seed Phrase (e.g., to get a base vector)
60
+ # For simplicity, hash it to get a deterministic starting point for numerical derivation
61
+ phrase_hash = hashlib.sha256(seed_phrase.encode()).hexdigest()
62
+ self.phrase_base_val = int(phrase_hash[:8], 16) # Use first 8 hex chars
63
+ if self.debug_prints_enabled: print(f" Phrase Base Value (from hash): {self.phrase_base_val}")
64
+
65
+ # 2. Process Seed Number (more direct influence on structure)
66
+ self.num_sequence = [int(d) for d in seed_number_str if d.isdigit()]
67
+ if not self.num_sequence: self.num_sequence = [0] # Fallback
68
+ if self.debug_prints_enabled: print(f" Numerical Sequence (from seed number): {self.num_sequence}")
69
+
70
+ self.init_map = self._generate_init_map()
71
+ if self.debug_prints_enabled:
72
+ print(f" Generated InitMap:")
73
+ for i, block_config in enumerate(self.init_map["block_configs"]):
74
+ print(f" Block {i}: Active Module Index: {block_config['active_module_idx']}, Target Entropy: {block_config['target_entropy']:.4f}, Gate Inits: {[f'{g:.2f}' for g in block_config['gate_inits']]}")
75
+ print(f"--- SeedParser Initialized ---")
76
+
77
+ def _get_deterministic_value(self, key_name, min_val, max_val, sequence_idx_offset=0):
78
+ # Combine phrase base and numerical sequence for more variation
79
+ combined_seed_val = self.phrase_base_val
80
+ for i, num in enumerate(self.num_sequence):
81
+ combined_seed_val += num * (10**(i + sequence_idx_offset))
82
+
83
+ # Hash the key_name to make it specific to the parameter
84
+ key_hash = int(hashlib.sha256(key_name.encode()).hexdigest()[:8], 16)
85
+ final_seed = combined_seed_val + key_hash
86
+
87
+ # Simple mapping to range (not cryptographically strong, but deterministic)
88
+ if max_val == min_val: return min_val # Avoid division by zero if range is 1
89
+ val = min_val + (final_seed % (max_val - min_val + 1))
90
+ return val
91
+
92
+ def _get_deterministic_float(self, key_name, min_val=0.0, max_val=1.0, sequence_idx_offset=0):
93
+ combined_seed_val = self.phrase_base_val
94
+ for i, num in enumerate(self.num_sequence):
95
+ combined_seed_val += num * (10**(i + sequence_idx_offset))
96
+
97
+ key_hash = int(hashlib.sha256(key_name.encode()).hexdigest()[:8], 16)
98
+ final_seed = combined_seed_val + key_hash
99
+
100
+ # Map to [0,1] float then scale
101
+ float_val = (final_seed % 1000001) / 1000000.0 # Ensure it's never exactly 0 for some ops
102
+ scaled_val = min_val + float_val * (max_val - min_val)
103
+ return scaled_val
104
+
105
+ def _generate_init_map(self):
106
+ init_map = {"block_configs": []}
107
+
108
+ for i in range(self.num_adaptive_blocks):
109
+ # Determine which sub-module is initially "more" active
110
+ active_module_idx = self._get_deterministic_value(
111
+ f"block_{i}_active_module", 0, self.num_sub_modules_per_block - 1, sequence_idx_offset=i
112
+ )
113
+
114
+ # Determine initial gating values (summing to 1 for softmax-like behavior later)
115
+ gate_inits_raw = [
116
+ self._get_deterministic_float(f"block_{i}_gate_{j}_init_raw", 0.1, 1.0, sequence_idx_offset=i*10 + j)
117
+ for j in range(self.num_sub_modules_per_block)
118
+ ]
119
+ # Make one gate stronger based on active_module_idx, then normalize slightly
120
+ if self.num_sub_modules_per_block > 0 :
121
+ gate_inits_raw[active_module_idx] *= 2.0 # Boost the 'active' one
122
+ sum_raw = sum(gate_inits_raw)
123
+ gate_inits_normalized = [g / sum_raw for g in gate_inits_raw] if sum_raw > 0 else [1.0/self.num_sub_modules_per_block]*self.num_sub_modules_per_block
124
+ else:
125
+ gate_inits_normalized = []
126
+
127
+
128
+ # Determine a target entropy for this block's output
129
+ target_entropy = self._get_deterministic_float(
130
+ f"block_{i}_target_entropy", 0.05, 0.3, sequence_idx_offset=i # Target a moderate, non-zero entropy
131
+ )
132
+
133
+ init_map["block_configs"].append({
134
+ "active_module_idx": active_module_idx, # For initial bias
135
+ "gate_inits": gate_inits_normalized, # Initial values for learnable gates
136
+ "target_entropy": target_entropy
137
+ })
138
+ return init_map
139
+
140
+ def get_block_config(self, block_idx):
141
+ if 0 <= block_idx < len(self.init_map["block_configs"]):
142
+ return self.init_map["block_configs"][block_idx]
143
+ return None
144
+
145
+ # --- Adaptive Block ---
146
+ class AdaptiveBlock(nn.Module):
147
+ def __init__(self, d_model, n_heads, d_ff, dropout, seed_parser_config, block_idx, num_sub_modules=3):
148
+ super().__init__()
149
+ self.d_model = d_model
150
+ self.block_idx = block_idx
151
+ self.num_sub_modules = num_sub_modules
152
+ self.config_from_seed = seed_parser_config # dict for this block
153
+ self.debug_prints_enabled = True
154
+
155
+ if self.debug_prints_enabled:
156
+ print(f" Initializing AdaptiveBlock {self.block_idx} with seed config: {self.config_from_seed}")
157
+
158
+ # Define potential sub-modules
159
+ self.sub_module_0 = nn.MultiheadAttention(d_model, n_heads, dropout=dropout, batch_first=True)
160
+ self.sub_module_1 = nn.Sequential(
161
+ nn.Linear(d_model, d_ff), nn.GELU(), nn.Dropout(dropout), nn.Linear(d_ff, d_model)
162
+ )
163
+ # Sub-module 2: A simpler FFN or even a near identity (residual + small transform)
164
+ self.sub_module_2 = nn.Sequential(
165
+ nn.Linear(d_model, d_model // 2), nn.GELU(), nn.Dropout(dropout), nn.Linear(d_model // 2, d_model)
166
+ )
167
+ # Add more diverse sub-modules if needed for `num_sub_modules_per_block`
168
+
169
+ self.sub_modules = nn.ModuleList([self.sub_module_0, self.sub_module_1, self.sub_module_2])
170
+
171
+ if self.num_sub_modules > len(self.sub_modules):
172
+ print(f"Warning: block {self.block_idx} requested {self.num_sub_modules} sub_modules, but only {len(self.sub_modules)} are defined. Using defined ones.")
173
+ self.num_sub_modules = len(self.sub_modules)
174
+
175
+
176
+ # Learnable gates for combining/selecting sub-modules
177
+ # Initialize gates based on seed_parser_config
178
+ gate_initial_values = self.config_from_seed.get("gate_inits", [1.0/self.num_sub_modules]*self.num_sub_modules if self.num_sub_modules > 0 else [])
179
+ if len(gate_initial_values) != self.num_sub_modules: # Fallback if seed parser gave wrong number
180
+ print(f"Warning: Block {self.block_idx} gate_inits length mismatch. Re-initializing uniformly.")
181
+ gate_initial_values = [1.0/self.num_sub_modules]*self.num_sub_modules if self.num_sub_modules > 0 else []
182
+
183
+ self.gates = nn.Parameter(torch.tensor(gate_initial_values, dtype=torch.float32))
184
+
185
+ self.norm1 = nn.LayerNorm(d_model)
186
+ self.norm2 = nn.LayerNorm(d_model) # For output of block
187
+ self.dropout = nn.Dropout(dropout)
188
+ self.output_entropy_estimator = EntropyEstimator(d_model, name=f"Block{block_idx}_OutEntropy")
189
+ self.wiring_phase_active = False # To be set by the main model
190
+
191
+ def set_wiring_phase(self, active):
192
+ self.wiring_phase_active = active
193
+ if self.debug_prints_enabled and active:
194
+ print(f" AdaptiveBlock {self.block_idx}: WIRING PHASE ACTIVATED")
195
+ elif self.debug_prints_enabled and not active:
196
+ print(f" AdaptiveBlock {self.block_idx}: WIRING PHASE DEACTIVATED")
197
+
198
+
199
+ def forward(self, x, key_padding_mask=None, attn_mask=None): # attn_mask is for MHA, key_padding_mask for MHA keys
200
+ if self.debug_prints_enabled:
201
+ current_gates_softmax = F.softmax(self.gates, dim=0)
202
+ print(f" AdaptiveBlock {self.block_idx} Input x: {x.shape}, Gates (softmax): {[f'{g.item():.3f}' for g in current_gates_softmax]}")
203
+
204
+ x_norm = self.norm1(x)
205
+
206
+ outputs = []
207
+ active_module_found = False
208
+ for i, module in enumerate(self.sub_modules):
209
+ if i >= self.num_sub_modules: break # Only use configured number
210
+
211
+ if i == 0: # MHA
212
+ # MHA expects key_padding_mask (N, S) bool: True if padded.
213
+ # attn_mask (L,S) or (N*H,L,S) float/bool: True if masked / -inf.
214
+ # For self-attention, L=S. If attn_mask is causal (L,L), it's fine.
215
+ # If key_padding_mask is (N,S), it's fine.
216
+ module_out, _ = module(x_norm, x_norm, x_norm,
217
+ key_padding_mask=key_padding_mask,
218
+ attn_mask=attn_mask,
219
+ need_weights=False) # Don't need weights for this sim
220
+ active_module_found = True
221
+ elif hasattr(module, 'fc1') or isinstance(module, nn.Sequential): # FFN-like
222
+ module_out = module(x_norm)
223
+ active_module_found = True
224
+ else: # Fallback for undefined module types in this simple sketch
225
+ module_out = x_norm # Pass through
226
+ outputs.append(module_out)
227
+
228
+ if not active_module_found or not outputs: # Should not happen if num_sub_modules > 0
229
+ print(f" AdaptiveBlock {self.block_idx}: No active sub_modules processed. Passing input through.")
230
+ final_out_unnorm = x # pass through
231
+ else:
232
+ # Gated combination
233
+ gate_weights = F.softmax(self.gates, dim=0) # Ensure they sum to 1
234
+
235
+ # Weighted sum of module outputs
236
+ # Ensure outputs are stackable (they should be if all modules output (B,S,D))
237
+ if outputs:
238
+ stacked_outputs = torch.stack(outputs, dim=0) # (num_sub_modules, B, S, D)
239
+ # gate_weights (num_sub_modules) -> (num_sub_modules, 1, 1, 1) for broadcasting
240
+ weighted_sum = torch.sum(stacked_outputs * gate_weights.view(-1, 1, 1, 1), dim=0)
241
+ final_out_unnorm = x + self.dropout(weighted_sum) # Residual connection
242
+ else: # Fallback if somehow no outputs
243
+ final_out_unnorm = x
244
+
245
+
246
+ final_out_norm = self.norm2(final_out_unnorm)
247
+
248
+ # During wiring phase, we might adjust gates based on local entropy vs target
249
+ # This is a very simplified "self-wiring" heuristic
250
+ current_output_entropy = self.output_entropy_estimator(final_out_norm, active_mask=~key_padding_mask if key_padding_mask is not None else None)
251
+ target_entropy_for_block = self.config_from_seed.get("target_entropy", 0.1) # Default target
252
+
253
+ if self.wiring_phase_active and self.training : # Only adjust gates during wiring AND training
254
+ with torch.no_grad(): # Don't track gradients for this heuristic adjustment
255
+ entropy_diff = current_output_entropy - target_entropy_for_block
256
+ # If current entropy is too high, slightly boost gates of modules that might reduce it (heuristic)
257
+ # If too low, slightly boost gates of modules that might increase it (heuristic)
258
+ # This is extremely heuristic. A true self-wiring mechanism would be more complex.
259
+ # For this sketch, let's say MHA (module 0) might increase complexity/entropy if it was low,
260
+ # and FFNs (module 1, 2) might refine/stabilize if entropy was high.
261
+ adjustment_strength = 0.01 # Small adjustment
262
+ if entropy_diff > 0.05: # Current entropy significantly higher than target
263
+ self.gates.data[1] += adjustment_strength
264
+ self.gates.data[2] += adjustment_strength
265
+ self.gates.data[0] -= adjustment_strength * 0.5 # Slightly decrease MHA
266
+ elif entropy_diff < -0.05: # Current entropy significantly lower
267
+ self.gates.data[0] += adjustment_strength
268
+ self.gates.data[1] -= adjustment_strength * 0.5
269
+ self.gates.data[2] -= adjustment_strength * 0.5
270
+ # Clamp gates to avoid extreme values before softmax (optional)
271
+ self.gates.data.clamp_(-2.0, 2.0)
272
+ if self.debug_prints_enabled:
273
+ print(f" AdaptiveBlock {self.block_idx} WIRING: OutEnt={current_output_entropy.item():.4f}, TgtEnt={target_entropy_for_block:.4f}, Δ={entropy_diff.item():.4f} -> New Gates (raw): {[f'{g.item():.3f}' for g in self.gates.data]}")
274
+
275
+ elif self.debug_prints_enabled:
276
+ print(f" AdaptiveBlock {self.block_idx} EXEC: OutEnt={current_output_entropy.item():.4f}, TgtEnt={target_entropy_for_block:.4f}")
277
+
278
+
279
+ # Return the block's output and its current estimated output entropy
280
+ return final_out_norm, current_output_entropy, gate_weights
281
+
282
+
283
+ # --- Positional Encoding ---
284
+ class PositionalEncoding(nn.Module):
285
+ def __init__(self,d_model,dropout=0.1,max_len=512): # Reduced max_len for this sketch
286
+ super().__init__()
287
+ self.dropout=nn.Dropout(p=dropout)
288
+ pe=torch.zeros(max_len,d_model)
289
+ pos=torch.arange(0,max_len,dtype=torch.float).unsqueeze(1)
290
+ div=torch.exp(torch.arange(0,d_model,2).float()*(-math.log(10000.0)/d_model))
291
+ pe[:,0::2]=torch.sin(pos*div)
292
+ pe[:,1::2]=torch.cos(pos*div)
293
+ self.register_buffer('pe',pe.unsqueeze(0)) # (1, max_len, d_model)
294
+ def forward(self,x): # x: (batch, seq_len, d_model)
295
+ x=x+self.pe[:,:x.size(1),:]
296
+ return self.dropout(x)
297
+
298
+ # --- Main SWCK Model ---
299
+ class SWCKModel(nn.Module):
300
+ def __init__(self, vocab_size, d_model, n_heads, d_ff, num_adaptive_blocks,
301
+ dropout, seed_phrase, seed_number_str, num_sub_modules_per_block=3):
302
+ super().__init__()
303
+ self.d_model = d_model
304
+ self.seed_phrase = seed_phrase
305
+ self.seed_number_str = seed_number_str
306
+ self.debug_prints_enabled = True
307
+
308
+ print(f"--- Initializing SWCKModel ---")
309
+ self.seed_parser = SeedParser(seed_phrase, seed_number_str, d_model, num_adaptive_blocks, num_sub_modules_per_block)
310
+
311
+ self.embedding = nn.Embedding(vocab_size, d_model)
312
+ self.pos_encoder = PositionalEncoding(d_model, dropout)
313
+
314
+ self.adaptive_blocks = nn.ModuleList()
315
+ for i in range(num_adaptive_blocks):
316
+ block_config = self.seed_parser.get_block_config(i)
317
+ if block_config is None:
318
+ raise ValueError(f"Could not get seed config for block {i}")
319
+ self.adaptive_blocks.append(
320
+ AdaptiveBlock(d_model, n_heads, d_ff, dropout, block_config, block_idx=i, num_sub_modules=num_sub_modules_per_block)
321
+ )
322
+ if self.debug_prints_enabled:
323
+ print(f" SWCKModel: Added AdaptiveBlock {i}")
324
+
325
+ self.fc_out = nn.Linear(d_model, vocab_size)
326
+ self.overall_output_entropy_estimator = EntropyEstimator(d_model, name="OverallOutEntropy")
327
+
328
+ self._init_weights()
329
+ print(f"--- SWCKModel Initialized ---")
330
+
331
+ def _init_weights(self):
332
+ initrange = 0.1
333
+ self.embedding.weight.data.uniform_(-initrange, initrange)
334
+ self.fc_out.bias.data.zero_()
335
+ self.fc_out.weight.data.uniform_(-initrange, initrange)
336
+
337
+ def set_wiring_phase(self, active):
338
+ if self.debug_prints_enabled:
339
+ print(f"SWCKModel: Setting wiring phase to {active} for all blocks.")
340
+ for block in self.adaptive_blocks:
341
+ block.set_wiring_phase(active)
342
+
343
+ def forward(self, src_tokens, src_key_padding_mask=None):
344
+ # src_tokens: (batch, seq_len)
345
+ # src_key_padding_mask: (batch, seq_len), True for padded positions
346
+ if self.debug_prints_enabled:
347
+ print(f"\n--- SWCKModel Forward Pass ---")
348
+ print(f" Input src_tokens: {src_tokens.shape}")
349
+ if src_key_padding_mask is not None: print(f" Input src_key_padding_mask: {src_key_padding_mask.shape}")
350
+
351
+ x = self.embedding(src_tokens) * math.sqrt(self.d_model)
352
+ x = self.pos_encoder(x)
353
+ if self.debug_prints_enabled: print(f" After Embedding & PosEnc, x: {x.shape}")
354
+
355
+ block_output_entropies = []
356
+ block_gate_weights = []
357
+
358
+ # For self-attention within blocks, a causal mask might be needed if it's a decoder-style model
359
+ # For this general "processing core" sketch, let's assume full self-attention unless specified.
360
+ # If this were a decoder, a causal mask would be passed or generated here.
361
+ # For now, no explicit top-level causal mask is made, relying on block's internal MHA params.
362
+ # A more standard transformer would create a causal mask for decoder self-attention.
363
+ # We'll pass src_key_padding_mask to MHA if it's self-attention on source.
364
+
365
+ for i, block in enumerate(self.adaptive_blocks):
366
+ if self.debug_prints_enabled: print(f" Processing AdaptiveBlock {i}...")
367
+ # For self-attention in blocks, key_padding_mask applies to keys/values.
368
+ # No separate attention mask for now unless it's a decoder block.
369
+ x, block_entropy, gates = block(x, key_padding_mask=src_key_padding_mask, attn_mask=None)
370
+ block_output_entropies.append(block_entropy)
371
+ block_gate_weights.append(gates)
372
+ if self.debug_prints_enabled: print(f" Output x from AdaptiveBlock {i}: {x.shape}, Entropy: {block_entropy.item():.4f}")
373
+
374
+ logits = self.fc_out(x)
375
+ if self.debug_prints_enabled: print(f" Output logits: {logits.shape}")
376
+
377
+ # Overall output entropy (of the final representation before fc_out)
378
+ # Masking for entropy calculation
379
+ final_active_mask = ~src_key_padding_mask if src_key_padding_mask is not None else None
380
+ overall_entropy = self.overall_output_entropy_estimator(x, active_mask=final_active_mask)
381
+ if self.debug_prints_enabled: print(f" Overall Final Representation Entropy: {overall_entropy.item():.4f}")
382
+
383
+ # Entropies from each block, overall output entropy, and gate weights for regularization/logging
384
+ entropy_report = {
385
+ "block_output_entropies": block_output_entropies, # List of tensors
386
+ "overall_output_entropy": overall_entropy, # Tensor
387
+ "block_gate_weights": block_gate_weights # List of tensors
388
+ }
389
+
390
+ return logits, entropy_report