File size: 6,643 Bytes
1030703
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
# -*- coding: utf-8 -*-
"""2_S19_cpu.ipynb

Automatically generated by Colab.

Original file is located at
    https://colab.research.google.com/drive/1laKI3fOJgsqMey3iQ5u3r8mhekJg-inM

## Building a GPT

Companion notebook to the [Zero To Hero](https://karpathy.ai/zero-to-hero.html) video on GPT.
"""
import torch
import torch.nn as nn
from torch.nn import functional as F

def GPT(txt, max_new_tokens=100):

  # hyperparameters
  batch_size = 16 # how many independent sequences will we process in parallel?
  block_size = 32 # what is the maximum context length for predictions?
  max_iters = 5000
  eval_interval = 100
  learning_rate = 1e-3
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
  eval_iters = 200
  n_embd = 64
  n_head = 4
  n_layer = 4
  dropout = 0.0
  # ------------

  torch.manual_seed(1337)

  with open('input.txt', 'r', encoding='utf-8') as f:
      text = f.read()
  # here are all the unique characters that occur in this text
  chars = sorted(list(set(text)))
  vocab_size = len(chars)
  # create a mapping from characters to integers
  stoi = { ch:i for i,ch in enumerate(chars) }
  itos = { i:ch for i,ch in enumerate(chars) }
  encode = lambda s: [stoi[c] for c in s] # encoder: take a string, output a list of integers
  decode = lambda l: ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string

  class Head(nn.Module):
      """ one head of self-attention """

      def __init__(self, head_size):
          super().__init__()
          self.key = nn.Linear(n_embd, head_size, bias=False)
          self.query = nn.Linear(n_embd, head_size, bias=False)
          self.value = nn.Linear(n_embd, head_size, bias=False)
          self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))

          self.dropout = nn.Dropout(dropout)

      def forward(self, x):
          B,T,C = x.shape
          k = self.key(x)   # (B,T,C)
          q = self.query(x) # (B,T,C)
          # compute attention scores ("affinities")
          wei = q @ k.transpose(-2,-1) * C**-0.5 # (B, T, C) @ (B, C, T) -> (B, T, T)
          wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) # (B, T, T)
          wei = F.softmax(wei, dim=-1) # (B, T, T)
          wei = self.dropout(wei)
          # perform the weighted aggregation of the values
          v = self.value(x) # (B,T,C)
          out = wei @ v # (B, T, T) @ (B, T, C) -> (B, T, C)
          return out

  class MultiHeadAttention(nn.Module):
      """ multiple heads of self-attention in parallel """

      def __init__(self, num_heads, head_size):
          super().__init__()
          self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
          self.proj = nn.Linear(n_embd, n_embd)
          self.dropout = nn.Dropout(dropout)

      def forward(self, x):
          out = torch.cat([h(x) for h in self.heads], dim=-1)
          out = self.dropout(self.proj(out))
          return out

  class FeedFoward(nn.Module):
      """ a simple linear layer followed by a non-linearity """

      def __init__(self, n_embd):
          super().__init__()
          self.net = nn.Sequential(
              nn.Linear(n_embd, 4 * n_embd),
              nn.ReLU(),
              nn.Linear(4 * n_embd, n_embd),
              nn.Dropout(dropout),
          )

      def forward(self, x):
          return self.net(x)

  class Block(nn.Module):
      """ Transformer block: communication followed by computation """

      def __init__(self, n_embd, n_head):
          # n_embd: embedding dimension, n_head: the number of heads we'd like
          super().__init__()
          head_size = n_embd // n_head
          self.sa = MultiHeadAttention(n_head, head_size)
          self.ffwd = FeedFoward(n_embd)
          self.ln1 = nn.LayerNorm(n_embd)
          self.ln2 = nn.LayerNorm(n_embd)

      def forward(self, x):
          x = x + self.sa(self.ln1(x))
          x = x + self.ffwd(self.ln2(x))
          return x

  # super simple bigram model
  class BigramLanguageModel(nn.Module):

      def __init__(self):
          super().__init__()
          # each token directly reads off the logits for the next token from a lookup table
          self.token_embedding_table = nn.Embedding(vocab_size, n_embd)
          self.position_embedding_table = nn.Embedding(block_size, n_embd)
          self.blocks = nn.Sequential(*[Block(n_embd, n_head=n_head) for _ in range(n_layer)])
          self.ln_f = nn.LayerNorm(n_embd) # final layer norm
          self.lm_head = nn.Linear(n_embd, vocab_size)

      def forward(self, idx, targets=None):
          B, T = idx.shape

          # idx and targets are both (B,T) tensor of integers
          tok_emb = self.token_embedding_table(idx) # (B,T,C)
          pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # (T,C)
          x = tok_emb + pos_emb # (B,T,C)
          x = self.blocks(x) # (B,T,C)
          x = self.ln_f(x) # (B,T,C)
          logits = self.lm_head(x) # (B,T,vocab_size)

          if targets is None:
              loss = None
          else:
              B, T, C = logits.shape
              logits = logits.view(B*T, C)
              targets = targets.view(B*T)
              loss = F.cross_entropy(logits, targets)

          return logits, loss

      def generate(self, idx, max_new_tokens):
          # idx is (B, T) array of indices in the current context
          for _ in range(max_new_tokens):
              # crop idx to the last block_size tokens
              idx_cond = idx[:, -block_size:]
              # get the predictions
              logits, loss = self(idx_cond)
              # focus only on the last time step
              logits = logits[:, -1, :] # becomes (B, C)
              # apply softmax to get probabilities
              probs = F.softmax(logits, dim=-1) # (B, C)
              # sample from the distribution
              idx_next = torch.multinomial(probs, num_samples=1) # (B, 1)
              # append sampled index to the running sequence
              idx = torch.cat((idx, idx_next), dim=1) # (B, T+1)
          return idx

  model = BigramLanguageModel()
  m = model.to(device)
  # print the number of parameters in the model
  print(sum(p.numel() for p in m.parameters())/1e6, 'M parameters')

  # create a PyTorch optimizer
  optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)


  m.load_state_dict(torch.load('wt_25k.pth', map_location=torch.device(device)))
  context = torch.tensor(encode(txt), dtype=torch.long)
  context = context.reshape(1, -1)
  ret_val = decode(m.generate(context, max_new_tokens=max_new_tokens)[0].tolist())
  return ret_val