{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [], "gpuType": "T4" }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" }, "accelerator": "GPU" }, "cells": [ { "cell_type": "markdown", "source": [ "## Building a GPT\n", "\n", "Companion notebook to the [Zero To Hero](https://karpathy.ai/zero-to-hero.html) video on GPT." ], "metadata": { "id": "wJpXpmjEYC_T" } }, { "cell_type": "code", "source": [ "!pip install -q python-docx\n" ], "metadata": { "id": "Rd8lAG81GIZR" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "import docx\n", "import re\n", "\n", "# Replace 'your_file.docx' with your file path\n", "doc_path = '/content/Shahname Ferdowsi.docx'\n", "\n", "def read_docx(file_path):\n", " doc = docx.Document(file_path)\n", " text = []\n", " for para in doc.paragraphs:\n", " text.append(para.text)\n", " return '\\n'.join(text)\n", "\n", "# Read the .docx file\n", "content = read_docx(doc_path)\n", "\n", "# Remove English alphabets using regex\n", "content_without_english = re.sub('[a-zA-Z]', '', content)\n", "\n", "text = content_without_english\n" ], "metadata": { "id": "O6medjfRsLD9" }, "execution_count": 1, "outputs": [] }, { "cell_type": "code", "source": [ "print(\"length of dataset in characters: \", len(text))" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "6xWI_VyAsN8F", "outputId": "d703a4c4-8318-4a65-a48a-c51c94deb4c8" }, "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "length of dataset in characters: 3867092\n" ] } ] }, { "cell_type": "code", "source": [ "# let's look at the first 1000 characters\n", "print(text[:1000])" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "2c5V0FvqseE0", "outputId": "de14fbee-c5d0-4ef9-95d3-23ab5d96edad" }, "execution_count": 3, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\n", "\n", "آغاز كتاب‏\n", " بنام خداوند جان و خرد \t \t كزين برتر انديشه بر نگذرد\n", " خداوند نام و خداوند جاى \t\t خداوند روزى‏ده رهنماى‏\n", " خداوند كيوان و گردان سپهر \t فروزنده ماه و ناهيد و مهر\n", " ز نام و نشان و گمان برترست \t \t نگارنده برشده پيكرست‏\n", " به بينندگان آفريننده را \t \t نبينى مرنجان دو بيننده را\n", " نيابد بدو نيز انديشه راه \t\t كه او برتر از نام و از جايگاه‏\n", " سخن هر چه زين گوهران بگذرد \t نيابد بدو راه جان و خرد\n", " خرد گر سخن برگزيند همى \t همان را گزيند كه بيند همى‏\n", " ستودن نداند كس او را چو هست \t ميان بندگى را ببايدت بست‏\n", " خرد را و جان را همى سنجد اوى در انديشۀ سخته كى گنجد اوى‏\n", " بدين آلت راى و جان و زبان \t \t ستود آفريننده را كى توان‏\n", " به هستيش بايد كه خستو شوى \t ز گفتار بى‏كار يك سو شوى‏\n", " پرستنده باشى و جوينده راه \t بژرفى بفرمانش كردن نگاه‏\n", " توانا بود هر كه دانا بود \n" ] } ] }, { "cell_type": "code", "source": [ "# here are all the unique characters that occur in this text\n", "chars = sorted(list(set(text)))\n", "vocab_size = len(chars)\n", "print(''.join(chars))\n", "print(vocab_size)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "0e-Rbyr8sfM8", "outputId": "5742a07a-c567-465c-8ba4-520eec8dbeef" }, "execution_count": 4, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\t\n", " &()*-0123456789:[]،؟ءآأؤئابتثجحخدذرزسشصضطظعغفقكلمنهوىيَُِّْپچژکگۀی‏\n", "70\n" ] } ] }, { "cell_type": "code", "source": [ "# create a mapping from characters to integers\n", "stoi = { ch:i for i,ch in enumerate(chars) }\n", "itos = { i:ch for i,ch in enumerate(chars) }\n", "encode = lambda s: [stoi[c] for c in s] # encoder: take a string, output a list of integers\n", "decode = lambda l: ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string\n", "\n", "print(encode(\"سلااام چطوری\"))\n", "print(decode(encode(\"سلااام چطوری\")))" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "Yw1LKNCgwjj1", "outputId": "717375fd-ece5-49fa-f0f4-97b215c1dc5a" }, "execution_count": 5, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "[39, 50, 28, 28, 28, 51, 2, 63, 43, 54, 37, 68]\n", "سلااام چطوری\n" ] } ] }, { "cell_type": "code", "source": [ "# let's now encode the entire text dataset and store it into a torch.Tensor\n", "import torch # we use PyTorch: https://pytorch.org\n", "data = torch.tensor(encode(text), dtype=torch.long)\n", "print(data.shape, data.dtype)\n", "print(data[:1000]) # the 1000 characters we looked at earier will to the GPT look like this" ], "metadata": { "id": "YJb0OXPwzvqg" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# Let's now split up the data into train and validation sets\n", "n = int(0.9*len(data)) # first 90% will be train, rest val\n", "train_data = data[:n]\n", "val_data = data[n:]" ], "metadata": { "id": "f_WIXqxz0lU5" }, "execution_count": 8, "outputs": [] }, { "cell_type": "code", "source": [ "block_size = 8\n", "train_data[:block_size+1]" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "TD5Bj8Y6IAD4", "outputId": "fef174ac-01f6-4043-ee46-d3d59fdba345" }, "execution_count": 9, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ "tensor([ 1, 1, 24, 46, 28, 38, 2, 49, 30])" ] }, "metadata": {}, "execution_count": 9 } ] }, { "cell_type": "code", "source": [ "x = train_data[:block_size]\n", "y = train_data[1:block_size+1]\n", "for t in range(block_size):\n", " context = x[:t+1]\n", " target = y[t]\n", " print(f\"when input is {context} the target: {target}\")" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "9HXDe8vGJCEn", "outputId": "2f223db6-2278-43fe-c4b0-1353dddfe538" }, "execution_count": 10, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "when input is tensor([1]) the target: 1\n", "when input is tensor([1, 1]) the target: 24\n", "when input is tensor([ 1, 1, 24]) the target: 46\n", "when input is tensor([ 1, 1, 24, 46]) the target: 28\n", "when input is tensor([ 1, 1, 24, 46, 28]) the target: 38\n", "when input is tensor([ 1, 1, 24, 46, 28, 38]) the target: 2\n", "when input is tensor([ 1, 1, 24, 46, 28, 38, 2]) the target: 49\n", "when input is tensor([ 1, 1, 24, 46, 28, 38, 2, 49]) the target: 30\n" ] } ] }, { "cell_type": "code", "source": [ "torch.manual_seed(1337)\n", "batch_size = 4 # how many independent sequences will we process in parallel?\n", "block_size = 8 # what is the maximum context length for predictions?\n", "\n", "def get_batch(split):\n", " # generate a small batch of data of inputs x and targets y\n", " data = train_data if split == 'train' else val_data\n", " ix = torch.randint(len(data) - block_size, (batch_size,))\n", " x = torch.stack([data[i:i+block_size] for i in ix])\n", " y = torch.stack([data[i+1:i+block_size+1] for i in ix])\n", " return x, y\n", "\n", "xb, yb = get_batch('train')\n", "print('inputs:')\n", "print(xb.shape)\n", "print(xb)\n", "print('targets:')\n", "print(yb.shape)\n", "print(yb)\n", "\n", "print('----')\n", "\n", "for b in range(batch_size): # batch dimension\n", " for t in range(block_size): # time dimension\n", " context = xb[b, :t+1]\n", " target = yb[b,t]\n", " print(f\"when input is {context.tolist()} the target: {target}\")" ], "metadata": { "id": "Q3k1Czf7LuA9" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "print(xb) # our input to the transformer" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "qpyyAeIzQjlO", "outputId": "b4ac6055-9b61-42fa-e1e6-0f957abe5bcd" }, "execution_count": 12, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "tensor([[30, 37, 28, 2, 29, 34, 30, 2],\n", " [51, 2, 40, 28, 62, 54, 37, 2],\n", " [ 2, 2, 2, 49, 53, 2, 37, 40],\n", " [35, 52, 35, 2, 66, 37, 35, 28]])\n" ] } ] }, { "cell_type": "code", "source": [ "import torch\n", "import torch.nn as nn\n", "from torch.nn import functional as F\n", "torch.manual_seed(1337)\n", "\n", "class BigramLanguageModel(nn.Module):\n", "\n", " def __init__(self, vocab_size):\n", " super().__init__()\n", " # each token directly reads off the logits for the next token from a lookup table\n", " self.token_embedding_table = nn.Embedding(vocab_size, vocab_size)\n", "\n", " def forward(self, idx, targets=None):\n", "\n", " # idx and targets are both (B,T) tensor of integers\n", " logits = self.token_embedding_table(idx) # (B,T,C)\n", "\n", " if targets is None:\n", " loss = None\n", " else:\n", " B, T, C = logits.shape\n", " logits = logits.view(B*T, C)\n", " targets = targets.view(B*T)\n", " loss = F.cross_entropy(logits, targets)\n", "\n", " return logits, loss\n", "\n", " def generate(self, idx, max_new_tokens):\n", " # idx is (B, T) array of indices in the current context\n", " for _ in range(max_new_tokens):\n", " # get the predictions\n", " logits, loss = self(idx)\n", " # focus only on the last time step\n", " logits = logits[:, -1, :] # becomes (B, C)\n", " # apply softmax to get probabilities\n", " probs = F.softmax(logits, dim=-1) # (B, C)\n", " # sample from the distribution\n", " idx_next = torch.multinomial(probs, num_samples=1) # (B, 1)\n", " # append sampled index to the running sequence\n", " idx = torch.cat((idx, idx_next), dim=1) # (B, T+1)\n", " return idx\n", "\n", "m = BigramLanguageModel(vocab_size)\n", "logits, loss = m(xb, yb)\n", "print(logits.shape)\n", "print(loss)\n", "\n", "print(decode(m.generate(idx = torch.zeros((1, 1), dtype=torch.long), max_new_tokens=100)[0].tolist()))\n" ], "metadata": { "id": "nql_1ER53oCf" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# create a PyTorch optimizer\n", "optimizer = torch.optim.AdamW(m.parameters(), lr=1e-3)" ], "metadata": { "id": "eTyJ8qAaDdiF" }, "execution_count": 14, "outputs": [] }, { "cell_type": "code", "source": [ "batch_size = 32\n", "for steps in range(100): # increase number of steps for good results...\n", "\n", " # sample a batch of data\n", " xb, yb = get_batch('train')\n", "\n", " # evaluate the loss\n", " logits, loss = m(xb, yb)\n", " optimizer.zero_grad(set_to_none=True)\n", " loss.backward()\n", " optimizer.step()\n", "\n", "print(loss.item())\n" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "Hs4kI8YdEkQj", "outputId": "31371728-b7fb-48e6-8b52-f00571f8d89f" }, "execution_count": 15, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "4.402019023895264\n" ] } ] }, { "cell_type": "code", "source": [ "print(decode(m.generate(idx = torch.zeros((1, 1), dtype=torch.long), max_new_tokens=500)[0].tolist()))" ], "metadata": { "id": "EcVIDWAZEtjN" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "### Full finished code, for reference\n", "\n", "You may want to refer directly to the git repo instead though." ], "metadata": { "id": "ZcvKeBXoZFOY" } }, { "cell_type": "code", "source": [ "torch.cuda.is_available()" ], "metadata": { "id": "IJFiK1n_WqLd", "outputId": "f42d7502-df43-4a8d-9905-d64b4048a8fb", "colab": { "base_uri": "https://localhost:8080/" } }, "execution_count": 3, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ "True" ] }, "metadata": {}, "execution_count": 3 } ] }, { "cell_type": "code", "source": [ "import torch\n", "import torch.nn as nn\n", "from torch.nn import functional as F\n", "\n", "# hyperparameters\n", "batch_size = 128 # how many independent sequences will we process in parallel?\n", "block_size = 256 # what is the maximum context length for predictions?\n", "max_iters = 5000\n", "eval_interval = 300\n", "learning_rate = 1e-3\n", "device = 'cuda' if torch.cuda.is_available() else 'cpu'\n", "eval_iters = 100\n", "n_embd = 128 # Increase hidden size\n", "n_head = 8 # Adjust number of attention heads\n", "n_layer = 12 # Increase number of layers\n", "\n", "dropout = 0.2\n", "# ------------\n", "\n", "torch.manual_seed(1337)\n", "\n", "\n", "text = text\n", "\n", "# here are all the unique characters that occur in this text\n", "chars = sorted(list(set(text)))\n", "vocab_size = len(chars)\n", "# create a mapping from characters to integers\n", "stoi = { ch:i for i,ch in enumerate(chars) }\n", "itos = { i:ch for i,ch in enumerate(chars) }\n", "encode = lambda s: [stoi[c] for c in s] # encoder: take a string, output a list of integers\n", "decode = lambda l: ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string\n", "\n", "# Train and test splits\n", "data = torch.tensor(encode(text), dtype=torch.long)\n", "n = int(0.9*len(data)) # first 90% will be train, rest val\n", "train_data = data[:n]\n", "val_data = data[n:]\n", "\n", "# data loading\n", "def get_batch(split):\n", " # generate a small batch of data of inputs x and targets y\n", " data = train_data if split == 'train' else val_data\n", " ix = torch.randint(len(data) - block_size, (batch_size,))\n", " x = torch.stack([data[i:i+block_size] for i in ix])\n", " y = torch.stack([data[i+1:i+block_size+1] for i in ix])\n", " x, y = x.to(device), y.to(device)\n", " return x, y\n", "\n", "@torch.no_grad()\n", "def estimate_loss():\n", " out = {}\n", " model.eval()\n", " for split in ['train', 'val']:\n", " losses = torch.zeros(eval_iters)\n", " for k in range(eval_iters):\n", " X, Y = get_batch(split)\n", " logits, loss = model(X, Y)\n", " losses[k] = loss.item()\n", " out[split] = losses.mean()\n", " model.train()\n", " return out\n", "\n", "class Head(nn.Module):\n", " \"\"\" one head of self-attention \"\"\"\n", "\n", " def __init__(self, head_size):\n", " super().__init__()\n", " self.key = nn.Linear(n_embd, head_size, bias=False)\n", " self.query = nn.Linear(n_embd, head_size, bias=False)\n", " self.value = nn.Linear(n_embd, head_size, bias=False)\n", " self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))\n", "\n", " self.dropout = nn.Dropout(dropout)\n", "\n", " def forward(self, x):\n", " B,T,C = x.shape\n", " k = self.key(x) # (B,T,C)\n", " q = self.query(x) # (B,T,C)\n", " # compute attention scores (\"affinities\")\n", " wei = q @ k.transpose(-2,-1) * C**-0.5 # (B, T, C) @ (B, C, T) -> (B, T, T)\n", " wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) # (B, T, T)\n", " wei = F.softmax(wei, dim=-1) # (B, T, T)\n", " wei = self.dropout(wei)\n", " # perform the weighted aggregation of the values\n", " v = self.value(x) # (B,T,C)\n", " out = wei @ v # (B, T, T) @ (B, T, C) -> (B, T, C)\n", " return out\n", "\n", "class MultiHeadAttention(nn.Module):\n", " \"\"\" multiple heads of self-attention in parallel \"\"\"\n", "\n", " def __init__(self, num_heads, head_size):\n", " super().__init__()\n", " self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])\n", " self.proj = nn.Linear(n_embd, n_embd)\n", " self.dropout = nn.Dropout(dropout)\n", "\n", " def forward(self, x):\n", " out = torch.cat([h(x) for h in self.heads], dim=-1)\n", " out = self.dropout(self.proj(out))\n", " return out\n", "\n", "class FeedFoward(nn.Module):\n", " \"\"\" a simple linear layer followed by a non-linearity \"\"\"\n", "\n", " def __init__(self, n_embd):\n", " super().__init__()\n", " self.net = nn.Sequential(\n", " nn.Linear(n_embd, 4 * n_embd),\n", " nn.ReLU(),\n", " nn.Linear(4 * n_embd, n_embd),\n", " nn.Dropout(dropout),\n", " )\n", "\n", " def forward(self, x):\n", " return self.net(x)\n", "\n", "class Block(nn.Module):\n", " \"\"\" Transformer block: communication followed by computation \"\"\"\n", "\n", " def __init__(self, n_embd, n_head):\n", " # n_embd: embedding dimension, n_head: the number of heads we'd like\n", " super().__init__()\n", " head_size = n_embd // n_head\n", " self.sa = MultiHeadAttention(n_head, head_size)\n", " self.ffwd = FeedFoward(n_embd)\n", " self.ln1 = nn.LayerNorm(n_embd)\n", " self.ln2 = nn.LayerNorm(n_embd)\n", "\n", " def forward(self, x):\n", " x = x + self.sa(self.ln1(x))\n", " x = x + self.ffwd(self.ln2(x))\n", " return x\n", "\n", "# super simple bigram model\n", "class BigramLanguageModel(nn.Module):\n", "\n", " def __init__(self):\n", " super().__init__()\n", " # each token directly reads off the logits for the next token from a lookup table\n", " self.token_embedding_table = nn.Embedding(vocab_size, n_embd)\n", " self.position_embedding_table = nn.Embedding(block_size, n_embd)\n", " self.blocks = nn.Sequential(*[Block(n_embd, n_head=n_head) for _ in range(n_layer)])\n", " self.ln_f = nn.LayerNorm(n_embd) # final layer norm\n", " self.lm_head = nn.Linear(n_embd, vocab_size)\n", "\n", " def forward(self, idx, targets=None):\n", " B, T = idx.shape\n", "\n", " # idx and targets are both (B,T) tensor of integers\n", " tok_emb = self.token_embedding_table(idx) # (B,T,C)\n", " pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # (T,C)\n", " x = tok_emb + pos_emb # (B,T,C)\n", " x = self.blocks(x) # (B,T,C)\n", " x = self.ln_f(x) # (B,T,C)\n", " logits = self.lm_head(x) # (B,T,vocab_size)\n", "\n", " if targets is None:\n", " loss = None\n", " else:\n", " B, T, C = logits.shape\n", " logits = logits.view(B*T, C)\n", " targets = targets.view(B*T)\n", " loss = F.cross_entropy(logits, targets)\n", "\n", " return logits, loss\n", "\n", " def generate(self, idx, max_new_tokens):\n", " # idx is (B, T) array of indices in the current context\n", " for _ in range(max_new_tokens):\n", " # crop idx to the last block_size tokens\n", " idx_cond = idx[:, -block_size:]\n", " # get the predictions\n", " logits, loss = self(idx_cond)\n", " # focus only on the last time step\n", " logits = logits[:, -1, :] # becomes (B, C)\n", " # apply softmax to get probabilities\n", " probs = F.softmax(logits, dim=-1) # (B, C)\n", " # sample from the distribution\n", " idx_next = torch.multinomial(probs, num_samples=1) # (B, 1)\n", " # append sampled index to the running sequence\n", " idx = torch.cat((idx, idx_next), dim=1) # (B, T+1)\n", " return idx\n", "\n", "model = BigramLanguageModel()\n", "m = model.to(device)\n", "# print the number of parameters in the model\n", "print(sum(p.numel() for p in m.parameters())/1e6, 'M parameters')\n", "\n", "# create a PyTorch optimizer\n", "optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)\n", "\n", "for iter in range(max_iters):\n", "\n", " # every once in a while evaluate the loss on train and val sets\n", " if iter % eval_interval == 0 or iter == max_iters - 1:\n", " losses = estimate_loss()\n", " print(f\"step {iter}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}\")\n", "\n", " # sample a batch of data\n", " xb, yb = get_batch('train')\n", "\n", " # evaluate the loss\n", " logits, loss = model(xb, yb)\n", " optimizer.zero_grad(set_to_none=True)\n", " loss.backward()\n", " optimizer.step()\n", "\n", "# generate from the model\n", "context = torch.zeros((1, 1), dtype=torch.long, device=device)\n", "print(decode(m.generate(context, max_new_tokens=2000)[0].tolist()))\n" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "hoelkOrFY8bN", "outputId": "c01f10ef-048b-41b4-c862-031c7e7281c9" }, "execution_count": 4, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "2.42567 M parameters\n", "step 0: train loss 4.4474, val loss 4.4467\n", "step 300: train loss 1.7789, val loss 1.7773\n", "step 600: train loss 1.4613, val loss 1.4679\n", "step 900: train loss 1.2493, val loss 1.2604\n", "step 1200: train loss 1.1231, val loss 1.1440\n", "step 1500: train loss 1.0568, val loss 1.0844\n", "step 1800: train loss 1.0104, val loss 1.0401\n", "step 2100: train loss 0.9701, val loss 1.0066\n", "step 2400: train loss 0.9385, val loss 0.9754\n", "step 2700: train loss 0.9122, val loss 0.9547\n", "step 3000: train loss 0.8927, val loss 0.9387\n", "step 3300: train loss 0.8747, val loss 0.9226\n", "step 3600: train loss 0.8646, val loss 0.9148\n", "step 3900: train loss 0.8546, val loss 0.9087\n", "step 4200: train loss 0.8414, val loss 0.8990\n", "step 4500: train loss 0.8352, val loss 0.8919\n", "step 4800: train loss 0.8238, val loss 0.8827\n", "step 4999: train loss 0.8193, val loss 0.8796\n", "\t گروهر شده جوشن با يوز رخ سروه‏\n", " همى گور و ديده بيوق و تير همان غلت شاپور و چندى مپير\n", " هم اندر زمان غلعه فرخ اوست همه سال گردنده شد گيو اوست‏\n", " اگر سوگوارست پيكار بيد همى ژعف و خنجر ز سازند بيد\n", " همه جنگ را مشك هست و غم زمين شد ز آهوش استر دژم‏\n", " سپه را سر بابر افراسياب بزد باد و پاى و رعد پذير\n", " يكى جنگ پيلى فرو مايه كرد همه بگذرد اختر اينسان كرد\n", " بدو گفت با دو پى اى داشتست سخن‏گوى و كشور بافراج داست‏\n", " همى جنگ جمّى بمستى زوان بشد گستهم چشم بد نيك روان‏\n", " خداوند پر ما ز گستهم خور بهر معدبان طرز گهر هور\n", " چنان تاخت شاه آمد از چو گنگ جز از غم ديدگان بس اندر درنگ‏\n", " [ و گر زين و از باره آهخت و راه بدين تيغ زن شاه در رزمگاه‏]\n", " سكندر بشمزين يكى رزم زشت خرد شاد بايد استيد گل‏\n", " [ شگاهى تور رستم‏]\n", " [ چو اورنده باشد آورد به‏سال زمين زرد بسيار بينيد خاك‏]\n", " [ چو خورشيد گشت از شمار ديد شده لشكر از ميان كار تيد‏]\n", " يكى كار سودابه بى‏نان وزير چو تنها بدين تا بد شهريس‏\n", " [ بفتراف زادى مدارى پسر كه تا چيز را نيز اسبان در حرن‏]\n", " دو مانديش از كار چونى سپاه سم زاورش از آن بهر كلاه‏\n", " چنين گفت پيران چنين گفت بخت كه با ناموزه شاه هنره تست‏\n", " ميان دو پاكيزه بود نگذرد بكام من بريشان بشست كرد\n", " بزابل چو فرزند تو شوم شاد برتر چنين گفت مانى كداد\n", " برستم بايد اكنون گشت زاد دل زخم گردان و خندان براد\n", " ورا من دبيرون تن اندر كنيد نگر تار باشى بپيوند كنيمد\n", " شاهنامه، ص: 87\n", " [ ورا داد پنيروز نوشين روان گر از مردم افگنده پهلوان‏]\n", " [ مرا زانج دانات كردار جست سپه دار گيتى نيابد بشست‏]\n", " [ تن بى‏گمان ميز ايران مراست كه اى نامور بخورش در نعل‏]\n", " [ كسى داده‏يى رزم شب چون درم درف\n" ] } ] }, { "cell_type": "code", "source": [ "torch.save(model.state_dict(), 'language_model.pth')" ], "metadata": { "id": "T-rD48Xwm5pc" }, "execution_count": 5, "outputs": [] }, { "cell_type": "code", "source": [ "from google.colab import drive\n", "drive.mount('/content/drive')" ], "metadata": { "id": "grP_S0osm6-5", "outputId": "3f478a95-bdfe-45e8-c596-ef9bdf2ce034", "colab": { "base_uri": "https://localhost:8080/" } }, "execution_count": 7, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Mounted at /content/drive\n" ] } ] }, { "cell_type": "code", "source": [ "# generate from the model\n", "context = torch.zeros((1, 1), dtype=torch.long, device=device)\n", "print(decode(m.generate(context, max_new_tokens=2000)[0].tolist()))" ], "metadata": { "id": "p92PG-OEsCvv", "outputId": "4a982c9e-51f3-4576-ae70-3fc51d1ae687", "colab": { "base_uri": "https://localhost:8080/" } }, "execution_count": 11, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\t چو نزديك سام بلند بسالار تركان بجايش گزند\n", " فرامور بآتش از اندر بپاى توانه روان رهنماى بپاى‏\n", " سراسر يكى مرد زان در گزيد نهان گمان آرد نه نامين كشيد\n", " [ كه بهرام گفتش كه برداشت بجز باژ جز تخت و كشتى براشت‏]\n", " [ كه تا از آن داد نژاد بود بزرگ آور و دل پر از بود]\n", " [ شوم شند پيروز سا شاه ماه همه نامور تخت شاه و سپاه‏]\n", " سر بى‏قباى و نامه برش چو با ماه شد بى‏گناهش اوى‏\n", " پرستندگان گفت كامون شوى برم گفت رسم نجست از زوى اوى‏\n", " همه پاك بايست مهتران همه راى گفته بديدار زيان‏\n", " بفرمود تا مهر قارن نشست پى سر بسر بر بپر مهر دست‏\n", " بدان تا مبادا يكى پهلوان نداريد ما دانش جهان سر و جوان‏\n", " همى سخت شنگل اندر آيد بدرد بازان رزم را برانى دلي]\n", " [ پند آگازان بر گيو نوذر شايستار و ژويه باك‏]\n", " چو خورشيد زفتى هيونى گرفت بلند اندر آن شاه آن زينهارمت‏\n", " بفرمود تا سر بسر هم همه بروبرز و ماه آمدش بمشت‏\n", " بدو گفت كاى شهريار منست كجات كيان از پى نان نيز منست‏\n", " بفرمود تا جشن درنج و تخت تهمتن نشنريد ماهيم و بخت‏\n", " شاهنامه، ص: 31\n", "\n", " مرا نيز جنگ پآن انديشه رفت زره ساله جنگ بى‏غم در گرفت‏\n", " از ان ناپس بهرام بيداد من‏\n", " كه بر دوه باران بديوان رسيد شب تيره گفتار توم شنيد\n", " اگر من ز كسرى مباديم آمدم و ز ان غرم دلاور كرد آمدم‏\n", " ز تركان بيارى برانى زمير بمى پيل بسسيار دو تنگ‏\n", " بگيريد چندى وفر اين برگ كه از بازگشتن ياد سرگ‏شم‏\n", " به مردى كو را بدو دست چو كوه فراوان شنگ اندرون شد دو گروه‏\n", " ز پيروز رخ آفرين كرد دست گرفت اين سخن يافتند ز پست‏\n", " همى خوان تبيرست بر حال ماه همى افسرستاد بايد ز راه‏\n", " درختيست اين راى را هرچ گفت كه برخاست نامه ز انگزيست جفت‏\n", " شنيد ليا مشك و بيداد چهر گمان جنگش برگ\n" ] } ] } ] }