import math from dataclasses import dataclass import torch import torch.nn as nn from torch.nn import functional as F class LlamaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.eps = eps def forward(self, x): rms = torch.sqrt(torch.mean(x * x, dim=-1, keepdim=True) + self.eps) x_norm = x / rms return self.weight * x_norm class LlamaRotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000): super().__init__() inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer("inv_freq", inv_freq) self.max_position_embeddings = max_position_embeddings self.dim = dim def forward(self, x, seq_len): t = torch.arange(seq_len, device=x.device).type_as(self.inv_freq) freqs = torch.einsum("i,j->ij", t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) return emb def rotate_half(x): x1, x2 = x[..., :x.shape[-1]//2], x[..., x.shape[-1]//2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids): cos = cos.unsqueeze(0).unsqueeze(0) sin = sin.unsqueeze(0).unsqueeze(0) cos = cos.expand(q.shape[0], q.shape[1], -1, -1) sin = sin.expand(k.shape[0], k.shape[1], -1, -1) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class LlamaSdpaAttention(nn.Module): def __init__(self, config): super().__init__() self.hidden_size = config.n_embd self.num_heads = config.n_head self.head_dim = config.n_embd // config.n_head self.num_key_value_heads = config.n_head // 3 self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False) self.rotary_emb = LlamaRotaryEmbedding(self.head_dim) def forward(self, x, attention_mask=None): B, T, C = x.size() q = self.q_proj(x).view(B, T, self.num_heads, self.head_dim) k = self.k_proj(x).view(B, T, self.num_key_value_heads, self.head_dim) v = self.v_proj(x).view(B, T, self.num_key_value_heads, self.head_dim) k = k.repeat_interleave(self.num_key_value_groups, dim=2) v = v.repeat_interleave(self.num_key_value_groups, dim=2) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) rotary_emb = self.rotary_emb(x, T) cos, sin = rotary_emb.cos(), rotary_emb.sin() q, k = apply_rotary_pos_emb(q, k, cos, sin, None) out = F.scaled_dot_product_attention(q, k, v, is_causal=True) out = out.transpose(1, 2).contiguous().view(B, T, C) return self.o_proj(out) class LlamaMLP(nn.Module): def __init__(self, config): super().__init__() self.gate_proj = nn.Linear(config.n_embd, config.intermediate_size, bias=False) self.up_proj = nn.Linear(config.n_embd, config.intermediate_size, bias=False) self.down_proj = nn.Linear(config.intermediate_size, config.n_embd, bias=False) self.act_fn = nn.SiLU() def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) class LlamaDecoderLayer(nn.Module): def __init__(self, config): super().__init__() self.input_layernorm = LlamaRMSNorm(config.n_embd) self.self_attn = LlamaSdpaAttention(config) self.post_attention_layernorm = LlamaRMSNorm(config.n_embd) self.mlp = LlamaMLP(config) def forward(self, x): residual = x x = self.input_layernorm(x) x = self.self_attn(x) x = residual + x residual = x x = self.post_attention_layernorm(x) x = self.mlp(x) x = residual + x return x @dataclass class SmolLM2Config: block_size: int = 2048 vocab_size: int = 49152 n_layer: int = 30 n_head: int = 9 n_embd: int = 576 intermediate_size: int = 1536 num_key_value_heads: int = 3 rms_norm_eps: float = 1e-5 rope_theta: float = 10000.0 initializer_range: float = 0.041666666666666664 use_cache: bool = True class SmolLM2(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embed_tokens = nn.Embedding(config.vocab_size, config.n_embd) self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.n_layer)]) self.norm = LlamaRMSNorm(config.n_embd, eps=config.rms_norm_eps) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.embed_tokens.weight = self.lm_head.weight self.apply(self._init_weights) def _init_weights(self, module): if isinstance(module, nn.Linear): torch.nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) if module.bias is not None: torch.nn.init.zeros_(module.bias) elif isinstance(module, nn.Embedding): torch.nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) def forward(self, idx, targets=None): B, T = idx.size() x = self.embed_tokens(idx) for layer in self.layers: x = layer(x) x = self.norm(x) logits = self.lm_head(x) loss = None if targets is not None: loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1)) return logits, loss def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None): for _ in range(max_new_tokens): idx_cond = idx[:, -self.config.block_size:] logits, _ = self(idx_cond) logits = logits[:, -1, :] / temperature if top_k is not None: v, _ = torch.topk(logits, min(top_k, logits.size(-1))) logits[logits < v[:, [-1]]] = float('-inf') probs = F.softmax(logits, dim=-1) idx_next = torch.multinomial(probs, num_samples=1) idx = torch.cat((idx, idx_next), dim=1) return idx