import torch import torch.nn as nn from torch.nn import functional as F import math from dataclasses import dataclass from transformers import AutoTokenizer import gradio as gr import zipfile import io # Copy all model classes here class LlamaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.eps = eps def forward(self, x): rms = torch.sqrt(torch.mean(x * x, dim=-1, keepdim=True) + self.eps) x_norm = x / rms return self.weight * x_norm class LlamaRotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000): super().__init__() inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer("inv_freq", inv_freq) self.max_position_embeddings = max_position_embeddings self.dim = dim def forward(self, x, seq_len): t = torch.arange(seq_len, device=x.device).type_as(self.inv_freq) freqs = torch.einsum("i,j->ij", t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) return emb def rotate_half(x): x1, x2 = x[..., :x.shape[-1]//2], x[..., x.shape[-1]//2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids): cos = cos.unsqueeze(0).unsqueeze(0) sin = sin.unsqueeze(0).unsqueeze(0) cos = cos.expand(q.shape[0], q.shape[1], -1, -1) sin = sin.expand(k.shape[0], k.shape[1], -1, -1) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class LlamaSdpaAttention(nn.Module): def __init__(self, config): super().__init__() self.hidden_size = config.n_embd self.num_heads = config.n_head self.head_dim = config.n_embd // config.n_head self.num_key_value_heads = config.n_head // 3 self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False) self.rotary_emb = LlamaRotaryEmbedding(self.head_dim) def forward(self, x, attention_mask=None): B, T, C = x.size() q = self.q_proj(x).view(B, T, self.num_heads, self.head_dim) k = self.k_proj(x).view(B, T, self.num_key_value_heads, self.head_dim) v = self.v_proj(x).view(B, T, self.num_key_value_heads, self.head_dim) k = k.repeat_interleave(self.num_key_value_groups, dim=2) v = v.repeat_interleave(self.num_key_value_groups, dim=2) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) rotary_emb = self.rotary_emb(x, T) cos, sin = rotary_emb.cos(), rotary_emb.sin() q, k = apply_rotary_pos_emb(q, k, cos, sin, None) out = F.scaled_dot_product_attention(q, k, v, is_causal=True) out = out.transpose(1, 2).contiguous().view(B, T, C) return self.o_proj(out) class LlamaMLP(nn.Module): def __init__(self, config): super().__init__() self.gate_proj = nn.Linear(config.n_embd, config.intermediate_size, bias=False) self.up_proj = nn.Linear(config.n_embd, config.intermediate_size, bias=False) self.down_proj = nn.Linear(config.intermediate_size, config.n_embd, bias=False) self.act_fn = nn.SiLU() def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) class LlamaDecoderLayer(nn.Module): def __init__(self, config): super().__init__() self.input_layernorm = LlamaRMSNorm(config.n_embd) self.self_attn = LlamaSdpaAttention(config) self.post_attention_layernorm = LlamaRMSNorm(config.n_embd) self.mlp = LlamaMLP(config) def forward(self, x): residual = x x = self.input_layernorm(x) x = self.self_attn(x) x = residual + x residual = x x = self.post_attention_layernorm(x) x = self.mlp(x) x = residual + x return x @dataclass class SmolLM2Config: block_size: int = 2048 vocab_size: int = 49152 n_layer: int = 30 n_head: int = 9 n_embd: int = 576 intermediate_size: int = 1536 num_key_value_heads: int = 3 rms_norm_eps: float = 1e-5 rope_theta: float = 10000.0 initializer_range: float = 0.041666666666666664 use_cache: bool = True class SmolLM2(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embed_tokens = nn.Embedding(config.vocab_size, config.n_embd) self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.n_layer)]) self.norm = LlamaRMSNorm(config.n_embd, eps=config.rms_norm_eps) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.embed_tokens.weight = self.lm_head.weight self.apply(self._init_weights) def _init_weights(self, module): if isinstance(module, nn.Linear): torch.nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) if module.bias is not None: torch.nn.init.zeros_(module.bias) elif isinstance(module, nn.Embedding): torch.nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) def forward(self, idx, targets=None): B, T = idx.size() x = self.embed_tokens(idx) for layer in self.layers: x = layer(x) x = self.norm(x) logits = self.lm_head(x) loss = None if targets is not None: loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1)) return logits, loss def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None): for _ in range(max_new_tokens): idx_cond = idx[:, -self.config.block_size:] logits, _ = self(idx_cond) logits = logits[:, -1, :] / temperature if top_k is not None: v, _ = torch.topk(logits, min(top_k, logits.size(-1))) logits[logits < v[:, [-1]]] = float('-inf') probs = F.softmax(logits, dim=-1) idx_next = torch.multinomial(probs, num_samples=1) idx = torch.cat((idx, idx_next), dim=1) return idx # Initialize model and tokenizer device = 'cuda' if torch.cuda.is_available() else 'cpu' tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/cosmo2-tokenizer") model = SmolLM2(SmolLM2Config()) # Load trained weights from zip def load_checkpoint_from_zip(zip_path, device): try: with zipfile.ZipFile(zip_path, 'r') as zf: with zf.open('checkpoint.pt') as f: buffer = io.BytesIO(f.read()) checkpoint = torch.load(buffer, map_location=device) if isinstance(checkpoint, dict) and 'model_state_dict' in checkpoint: return checkpoint['model_state_dict'] return checkpoint except Exception as e: print(f"Error loading checkpoint: {e}") return None # Try to load the checkpoint try: checkpoint_path = 'checkpoint_step_5000.zip' # or .pt file if checkpoint_path.endswith('.zip'): state_dict = load_checkpoint_from_zip(checkpoint_path, device) else: checkpoint = torch.load(checkpoint_path, map_location=device) state_dict = checkpoint['model_state_dict'] if isinstance(checkpoint, dict) else checkpoint if state_dict is not None: model.load_state_dict(state_dict) print("Checkpoint loaded successfully") except Exception as e: print(f"Error loading checkpoint: {e}") print("Initializing model with random weights") model.to(device) model.eval() def generate_text(prompt, max_length=20, temperature=0.7, top_k=50): """Generate text from a prompt""" input_ids = tokenizer.encode(prompt, return_tensors='pt').to(device) with torch.no_grad(): output_ids = model.generate( input_ids, max_new_tokens=max_length, temperature=temperature, top_k=top_k ) return tokenizer.decode(output_ids[0], skip_special_tokens=True) # Gradio interface def gradio_interface(prompt, max_length, temperature, top_k): return generate_text(prompt, int(max_length), float(temperature), int(top_k)) iface = gr.Interface( fn=gradio_interface, inputs=[ gr.Textbox(label="Prompt", placeholder="Enter your prompt here..."), gr.Slider(minimum=10, maximum=500, value=100, step=10, label="Max Length"), gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"), gr.Slider(minimum=1, maximum=100, value=50, step=1, label="Top K"), ], outputs=gr.Textbox(label="Generated Text"), title="SmolLM2 Text Generation", description="Generate Shakespeare-style text using the SmolLM2 model", examples=[ ["To be, or not to be:", 50, 0.7, 50], ["O Romeo, Romeo,", 40, 0.8, 40], ["All the world's a stage,", 60, 0.9, 45], ["Friends, Romans, countrymen,", 45, 0.7, 50], ["Now is the winter of", 55, 0.8, 40], ["If music be the food of love,", 50, 0.9, 45], ], article=""" ### Example Prompts: 1. **Hamlet's Soliloquy Style**: - "To be, or not to be:" - "What dreams may come when" 2. **Romeo and Juliet Style**: - "O Romeo, Romeo," - "But soft, what light" 3. **Macbeth Style**: - "Double, double toil and" - "Is this a dagger which" 4. **Sonnets Style**: - "Shall I compare thee to" - "When in disgrace with fortune" ### Tips: - Use higher temperature (0.8-0.9) for more creative outputs - Use lower temperature (0.6-0.7) for more focused text - Adjust max length based on your needs (20-100 tokens) """ ) if __name__ == "__main__": iface.launch()