kalekarnn commited on
Commit
f855bd6
·
verified ·
1 Parent(s): 4718c79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +115 -1
app.py CHANGED
@@ -1,7 +1,121 @@
1
  import streamlit as st
2
  import torch
3
  import tiktoken
4
- from transformer import GPT, GPTConfig # Ensure you import your model class
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  # Load the trained model
7
  @st.cache_resource
 
1
  import streamlit as st
2
  import torch
3
  import tiktoken
4
+ from dataclasses import dataclass
5
+
6
+ @dataclass
7
+ class GPTConfig:
8
+ block_size: int = 1024 # max sequence length
9
+ vocab_size: int = 50257 # number of tokens: 50,000 BPE merges + 256 bytes tokens + 1 <|endoftext|> token
10
+ n_layer: int = 12 # number of layers
11
+ n_head: int = 12 # number of heads
12
+ n_embd: int = 768 # embedding dimension
13
+
14
+
15
+ class GPT(nn.Module):
16
+
17
+ def __init__(self, config):
18
+ super().__init__()
19
+ self.config = config
20
+
21
+ self.transformer = nn.ModuleDict(dict(
22
+ wte = nn.Embedding(config.vocab_size, config.n_embd),
23
+ wpe = nn.Embedding(config.block_size, config.n_embd),
24
+ h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
25
+ ln_f = nn.LayerNorm(config.n_embd),
26
+ ))
27
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
28
+
29
+ # weight sharing
30
+ self.transformer.wte.weight = self.lm_head.weight
31
+
32
+ # weight initialization
33
+ self.apply(self._init_weights)
34
+
35
+ def _init_weights(self, module):
36
+ if isinstance(module, nn.Linear):
37
+ std = 0.02
38
+ if hasattr(module, 'NANGPT_SCALE_INIT'):
39
+ std *= (2 * self.config.n_layer) ** -0.5
40
+ torch.nn.init.normal_(module.weight, mean = 0.0, std = std)
41
+ if module.bias is not None:
42
+ torch.nn.init.zeros_(module.bias)
43
+ elif isinstance(module, nn.Embedding):
44
+ torch.nn.init.normal_(module.weight, mean=0.0, std = 0.02)
45
+
46
+ def print_num_parameters(self):
47
+ num_params = sum(p.numel() for p in self.parameters())
48
+ print(f"Number of model parameters: {num_params}")
49
+
50
+ def forward(self, idx, targets=None):
51
+ # idx is of shape (B, T)
52
+ B, T = idx.size()
53
+ assert T <= self.config.block_size, f"Cannot forward sequence of length {T}, block size is only {self.config.block_size}"
54
+ # forward the token and posisition embeddings
55
+ pos = torch.arange(0, T, dtype=torch.long, device=idx.device) # shape (T)
56
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (T, n_embd)
57
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (B, T, n_embd)
58
+ x = tok_emb + pos_emb
59
+ # forward the blocks of the transformer
60
+ for block in self.transformer.h:
61
+ x = block(x)
62
+ # forward the final layernorm and the classifier
63
+ x = self.transformer.ln_f(x)
64
+ logits = self.lm_head(x) # (B, T, vocab_size)
65
+ loss = None
66
+ if targets is not None:
67
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
68
+ return logits, loss
69
+
70
+ @classmethod
71
+ def from_pretrained(cls, model_type):
72
+ """Loads pretrained GPT-2 model weights from huggingface"""
73
+ assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}
74
+ from transformers import GPT2LMHeadModel
75
+ print("loading weights from pretrained gpt: %s" % model_type)
76
+
77
+ # n_layer, n_head and n_embd are determined from model_type
78
+ config_args = {
79
+ 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params
80
+ 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params
81
+ 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params
82
+ 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params
83
+ }[model_type]
84
+ config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints
85
+ config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints
86
+ # create a from-scratch initialized minGPT model
87
+ config = GPTConfig(**config_args)
88
+ model = GPT(config)
89
+ sd = model.state_dict()
90
+ sd_keys = sd.keys()
91
+ sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param
92
+
93
+ # init a huggingface/transformers model
94
+ model_hf = GPT2LMHeadModel.from_pretrained(model_type)
95
+ sd_hf = model_hf.state_dict()
96
+
97
+ # copy while ensuring all of the parameters are aligned and match in names and shapes
98
+ sd_keys_hf = sd_hf.keys()
99
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer
100
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer)
101
+ transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']
102
+ # basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear
103
+ # this means that we have to transpose these weights when we import them
104
+ assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
105
+ for k in sd_keys_hf:
106
+ if any(k.endswith(w) for w in transposed):
107
+ # special treatment for the Conv1D weights we need to transpose
108
+ assert sd_hf[k].shape[::-1] == sd[k].shape
109
+ with torch.no_grad():
110
+ sd[k].copy_(sd_hf[k].t())
111
+ else:
112
+ # vanilla copy over the other parameters
113
+ assert sd_hf[k].shape == sd[k].shape
114
+ with torch.no_grad():
115
+ sd[k].copy_(sd_hf[k])
116
+
117
+ return model
118
+
119
 
120
  # Load the trained model
121
  @st.cache_resource