ysn-rfd commited on
Commit
3448041
·
verified ·
1 Parent(s): 48d1c6f

Upload My_own_NN_GPT.py

Browse files
Files changed (1) hide show
  1. Custom_GPT_NN_ysnrfd/My_own_NN_GPT.py +1001 -0
Custom_GPT_NN_ysnrfd/My_own_NN_GPT.py ADDED
@@ -0,0 +1,1001 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ LICENSE:
3
+
4
+ Copyright 2025 ysnrfd
5
+
6
+ Timestamp: 2025-08-12
7
+
8
+ Permission is hereby granted, free of charge, to any person obtaining a copy
9
+ of this software and associated documentation files (the "Software"), to use,
10
+ copy, modify, and distribute the Software, subject to the following conditions:
11
+
12
+ 1. The copyright notice, this permission notice, and all attribution information
13
+ regarding the original author (ysnrfd) must be preserved in their entirety
14
+ and must not be removed, altered, or obscured in any copies or derivative works.
15
+
16
+ 2. Any modifications or derivative works must be clearly documented in a "CHANGELOG" or
17
+ "NOTICE" file included with the Software. This documentation must include a detailed
18
+ description of the changes made, the date of the modification, and the identity of
19
+ the modifier.
20
+
21
+ 3. The Software is provided "as is", without warranty of any kind, express or implied.
22
+ The author shall not be liable for any damages arising from use of the Software.
23
+
24
+ 4. Any attempt to remove or alter the original attribution or copyright information
25
+ constitutes a violation of this license and may result in legal action.
26
+
27
+ """
28
+
29
+ import math
30
+ import numpy as np
31
+ import pickle
32
+ import os
33
+ import time
34
+ from typing import List, Tuple, Dict, Any, Optional, Union
35
+ import warnings
36
+ DEFAULT_DTYPE = np.float32
37
+ EPS = 1e-6
38
+
39
+ def softmax(x: np.ndarray, axis: int = -1, eps: float = EPS) -> np.ndarray:
40
+ x = x - np.max(x, axis=axis, keepdims=True)
41
+ e = np.exp(x)
42
+ return e / (np.sum(e, axis=axis, keepdims=True) + eps)
43
+
44
+ def gelu(x: np.ndarray) -> np.ndarray:
45
+ return 0.5 * x * (1.0 + np.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * x**3)))
46
+
47
+ def gelu_exact(x: np.ndarray) -> np.ndarray:
48
+ return 0.5 * x * (1.0 + math.erf(x / np.sqrt(2.0)))
49
+
50
+ def gelu_grad(x: np.ndarray) -> np.ndarray:
51
+ tanh_term = np.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * x**3))
52
+ sech2 = 1.0 - tanh_term**2
53
+ return 0.5 * (1.0 + tanh_term) + 0.5 * x * sech2 * np.sqrt(2.0 / np.pi) * (1.0 + 3.0 * 0.044715 * x**2)
54
+
55
+ def rms_norm(x: np.ndarray, weight: np.ndarray, eps: float = EPS) -> np.ndarray:
56
+ rms = np.sqrt(np.mean(x**2, axis=-1, keepdims=True) + eps)
57
+ return weight * (x / rms)
58
+
59
+ class BPETokenizer:
60
+ def __init__(self):
61
+ self.vocab: List[str] = []
62
+ self.w2i: Dict[str, int] = {}
63
+ self.i2w: Dict[int, str] = {}
64
+ self.merges: List[Tuple[str, str]] = []
65
+ self.cache: Dict[str, List[str]] = {}
66
+ self.special_tokens: List[str] = ['<pad>', '<unk>', '<bos>', '<eos>']
67
+
68
+ @staticmethod
69
+ def get_pairs(word: Tuple[str, ...]) -> Set[Tuple[str, str]]:
70
+ return set(zip(word, word[1:]))
71
+
72
+ @staticmethod
73
+ def bytes_to_unicode() -> Dict[int, str]:
74
+ bs = list(range(ord("!"), ord("~") + 1)) + \
75
+ list(range(ord("¡"), ord("¬") + 1)) + \
76
+ list(range(ord("®"), ord("ÿ") + 1))
77
+ cs = bs[:]
78
+ n = 0
79
+ for b in range(2**8):
80
+ if b not in bs:
81
+ bs.append(b)
82
+ cs.append(2**8 + n)
83
+ n += 1
84
+ cs = [chr(n) for n in cs]
85
+ return dict(zip(bs, cs))
86
+
87
+ def preprocess(self, text: str) -> str:
88
+ byte_encoder = self.bytes_to_unicode()
89
+ text_bytes = text.encode("utf-8")
90
+ return "".join([byte_encoder[b] for b in text_bytes])
91
+
92
+ def build_from_text(self, texts: List[str], vocab_size: int = 500, min_freq: int = 2):
93
+ preprocessed = [self.preprocess(text) for text in texts]
94
+ char_freq = {}
95
+ for text in preprocessed:
96
+ for char in text:
97
+ char_freq[char] = char_freq.get(char, 0) + 1
98
+ self.vocab = self.special_tokens + sorted(char_freq.keys(), key=lambda x: -char_freq[x])
99
+ self.w2i = {w: i for i, w in enumerate(self.vocab)}
100
+ self.i2w = {i: w for w, i in self.w2i.items()}
101
+ if len(self.vocab) < vocab_size:
102
+ words = []
103
+ for text in preprocessed:
104
+ words.extend([' '.join(text)])
105
+ word_freq = {}
106
+ for word in words:
107
+ word_freq[word] = word_freq.get(word, 0) + 1
108
+ num_merges = vocab_size - len(self.vocab)
109
+ for i in range(num_merges):
110
+ pairs = {}
111
+ for word, freq in word_freq.items():
112
+ chars = word.split()
113
+ for j in range(len(chars) - 1):
114
+ pair = (chars[j], chars[j+1])
115
+ pairs[pair] = pairs.get(pair, 0) + freq
116
+ if not pairs:
117
+ break
118
+ best_pair = max(pairs, key=pairs.get)
119
+ new_token = ''.join(best_pair)
120
+ if new_token not in self.w2i:
121
+ self.vocab.append(new_token)
122
+ self.w2i[new_token] = len(self.vocab) - 1
123
+ self.i2w[len(self.vocab) - 1] = new_token
124
+ self.merges.append(best_pair)
125
+ new_word_freq = {}
126
+ for word, freq in word_freq.items():
127
+ new_word = word.replace(' '.join(best_pair), new_token)
128
+ new_word_freq[new_word] = freq
129
+ word_freq = new_word_freq
130
+
131
+ def encode(self, text: str, max_len: int = None, add_bos: bool = False, add_eos: bool = False) -> np.ndarray:
132
+ text = self.preprocess(text)
133
+ if add_bos:
134
+ text = self.special_tokens[2] + text
135
+ if add_eos:
136
+ text = text + self.special_tokens[3]
137
+ if text in self.cache:
138
+ tokens = self.cache[text]
139
+ else:
140
+ tokens = list(text)
141
+ for pair in self.merges:
142
+ new_tokens = []
143
+ i = 0
144
+ while i < len(tokens):
145
+ if i < len(tokens) - 1 and tokens[i] == pair[0] and tokens[i+1] == pair[1]:
146
+ new_tokens.append(pair[0] + pair[1])
147
+ i += 2
148
+ else:
149
+ new_tokens.append(tokens[i])
150
+ i += 1
151
+ tokens = new_tokens
152
+ self.cache[text] = tokens
153
+ ids = [self.w2i.get(t, self.w2i['<unk>']) for t in tokens]
154
+ if max_len is not None and len(ids) > max_len:
155
+ ids = ids[:max_len]
156
+ if max_len is not None and len(ids) < max_len:
157
+ ids = ids + [self.w2i['<pad>']] * (max_len - len(ids))
158
+ return np.array(ids, dtype=np.int32)
159
+
160
+ def decode(self, ids: Union[np.ndarray, List[int]]) -> str:
161
+ tokens = [self.i2w.get(int(i), '<unk>') for i in ids]
162
+ text = ''.join(tokens)
163
+ for token in self.special_tokens:
164
+ text = text.replace(token, '')
165
+ byte_decoder = {v: k for k, v in self.bytes_to_unicode().items()}
166
+ text_bytes = bytearray([byte_decoder[c] for c in text])
167
+ return text_bytes.decode('utf-8', errors='replace')
168
+
169
+ class Embedding:
170
+ def __init__(self, vocab_size: int, d_model: int, dtype=DEFAULT_DTYPE):
171
+ self.vocab_size = vocab_size
172
+ self.d_model = d_model
173
+ self.dtype = dtype
174
+ scale = 1.0 / np.sqrt(d_model)
175
+ self.W = np.random.normal(0, scale, (vocab_size, d_model)).astype(dtype)
176
+ self.grad_W = np.zeros_like(self.W)
177
+
178
+ def forward(self, idx: np.ndarray) -> np.ndarray:
179
+ return self.W[idx]
180
+
181
+ def backward(self, idx: np.ndarray, grad: np.ndarray):
182
+ np.add.at(self.grad_W, idx, grad)
183
+
184
+ class PositionalEmbedding:
185
+ def __init__(self, max_len: int, d_model: int, use_rotary: bool = False, dtype=DEFAULT_DTYPE):
186
+ self.max_len = max_len
187
+ self.d_model = d_model
188
+ self.use_rotary = use_rotary
189
+ self.dtype = dtype
190
+ if not use_rotary:
191
+ self.W = np.zeros((max_len, d_model), dtype=dtype)
192
+ for pos in range(max_len):
193
+ for i in range(0, d_model, 2):
194
+ self.W[pos, i] = math.sin(pos / (10000 ** (i / d_model)))
195
+ if i + 1 < d_model:
196
+ self.W[pos, i + 1] = math.cos(pos / (10000 ** (i / d_model)))
197
+ self.grad_W = np.zeros_like(self.W)
198
+ else:
199
+ self.rotary_freqs = self._create_rotary_frequencies()
200
+
201
+ def _create_rotary_frequencies(self) -> np.ndarray:
202
+ inv_freq = 1.0 / (10000 ** (np.arange(0, self.d_model, 2, dtype=self.dtype) / self.d_model))
203
+ return inv_freq
204
+
205
+ def apply_rotary_pos_emb(self, x: np.ndarray, seq_dim: int = -2) -> np.ndarray:
206
+ seq_len = x.shape[seq_dim]
207
+ t = np.arange(seq_len, dtype=self.dtype)
208
+ freqs = np.outer(t, self.rotary_freqs)
209
+ cos = np.cos(freqs)
210
+ sin = np.sin(freqs)
211
+ x1 = x[..., 0::2]
212
+ x2 = x[..., 1::2]
213
+ x_rotated1 = x1 * cos - x2 * sin
214
+ x_rotated2 = x1 * sin + x2 * cos
215
+ x_rotated = np.zeros_like(x)
216
+ x_rotated[..., 0::2] = x_rotated1
217
+ x_rotated[..., 1::2] = x_rotated2
218
+ return x_rotated
219
+
220
+ def forward(self, seq_len: int) -> np.ndarray:
221
+ if not self.use_rotary:
222
+ return self.W[:seq_len][np.newaxis, :, :]
223
+ return None
224
+
225
+ def backward(self, seq_len: int, grad: np.ndarray):
226
+ if not self.use_rotary:
227
+ np.add.at(self.grad_W, np.arange(seq_len), np.sum(grad, axis=0))
228
+
229
+ class LayerNorm:
230
+ def __init__(self, d_model: int, eps: float = EPS, rms_norm: bool = False, dtype=DEFAULT_DTYPE):
231
+ self.d_model = d_model
232
+ self.eps = eps
233
+ self.rms_norm = rms_norm
234
+ self.dtype = dtype
235
+ if not rms_norm:
236
+ self.gamma = np.ones((1, 1, d_model), dtype=dtype)
237
+ self.beta = np.zeros((1, 1, d_model), dtype=dtype)
238
+ self.grad_gamma = np.zeros_like(self.gamma)
239
+ self.grad_beta = np.zeros_like(self.beta)
240
+ else:
241
+ self.weight = np.ones((1, 1, d_model), dtype=dtype)
242
+ self.grad_weight = np.zeros_like(self.weight)
243
+ self.x = None
244
+ self.mean = None
245
+ self.var = None
246
+ self.x_norm = None
247
+
248
+ def forward(self, x: np.ndarray) -> np.ndarray:
249
+ self.x = x
250
+ if self.rms_norm:
251
+ rms = np.sqrt(np.mean(x**2, axis=-1, keepdims=True) + self.eps)
252
+ self.x_norm = x / rms
253
+ return self.weight * self.x_norm
254
+ else:
255
+ self.mean = np.mean(x, axis=-1, keepdims=True)
256
+ self.var = np.var(x, axis=-1, keepdims=True)
257
+ self.x_norm = (x - self.mean) / np.sqrt(self.var + self.eps)
258
+ return self.gamma * self.x_norm + self.beta
259
+
260
+ def backward(self, grad: np.ndarray) -> np.ndarray:
261
+ if self.rms_norm:
262
+ grad_x_norm = grad * self.weight
263
+ x_norm2 = self.x_norm ** 2
264
+ d_rms = -np.sum(grad_x_norm * self.x_norm, axis=-1, keepdims=True) / np.sqrt(np.mean(x_norm2, axis=-1, keepdims=True) + self.eps)
265
+ d_x = (grad_x_norm - self.x_norm * d_rms) / self.x_norm.shape[-1]
266
+ self.grad_weight = np.sum(grad * self.x_norm, axis=(0, 1), keepdims=True)
267
+ return d_x
268
+ else:
269
+ b, s, d = grad.shape
270
+ self.grad_gamma = np.sum(grad * self.x_norm, axis=(0, 1), keepdims=True)
271
+ self.grad_beta = np.sum(grad, axis=(0, 1), keepdims=True)
272
+ dx_norm = grad * self.gamma
273
+ var_eps = self.var + self.eps
274
+ dx = (1. / np.sqrt(var_eps)) * (dx_norm - np.mean(dx_norm, axis=-1, keepdims=True) -
275
+ self.x_norm * np.mean(dx_norm * self.x_norm, axis=-1, keepdims=True))
276
+ return dx
277
+
278
+ class FeedForward:
279
+ def __init__(self, d_model: int, d_ff: int, dropout: float = 0.1, dtype=DEFAULT_DTYPE):
280
+ self.d_model = d_model
281
+ self.d_ff = d_ff
282
+ self.dropout = dropout
283
+ self.dtype = dtype
284
+ scale_in = 1.0 / np.sqrt(d_model)
285
+ scale_out = 1.0 / np.sqrt(d_ff)
286
+ self.W1 = np.random.normal(0, scale_in, (d_model, d_ff)).astype(dtype)
287
+ self.b1 = np.zeros((1, 1, d_ff), dtype=dtype)
288
+ self.W2 = np.random.normal(0, scale_out, (d_ff, d_model)).astype(dtype)
289
+ self.b2 = np.zeros((1, 1, d_model), dtype=dtype)
290
+ self.grad_W1 = np.zeros_like(self.W1)
291
+ self.grad_b1 = np.zeros_like(self.b1)
292
+ self.grad_W2 = np.zeros_like(self.W2)
293
+ self.grad_b2 = np.zeros_like(self.b2)
294
+ self.x = None
295
+ self.hidden = None
296
+ self.hidden_act = None
297
+ self.dropout_mask1 = None
298
+ self.dropout_mask2 = None
299
+
300
+ def forward(self, x: np.ndarray, training: bool = True) -> np.ndarray:
301
+ self.x = x
302
+ b, s, d = x.shape
303
+ self.hidden = x @ self.W1 + self.b1
304
+ self.hidden_act = gelu(self.hidden)
305
+ if training and self.dropout > 0:
306
+ self.dropout_mask1 = (np.random.rand(*self.hidden_act.shape) > self.dropout)
307
+ self.hidden_act = self.hidden_act * self.dropout_mask1 / (1 - self.dropout)
308
+ else:
309
+ self.dropout_mask1 = None
310
+ out = self.hidden_act @ self.W2 + self.b2
311
+ if training and self.dropout > 0:
312
+ self.dropout_mask2 = (np.random.rand(*out.shape) > self.dropout)
313
+ out = out * self.dropout_mask2 / (1 - self.dropout)
314
+ else:
315
+ self.dropout_mask2 = None
316
+ return out
317
+
318
+ def backward(self, grad: np.ndarray) -> np.ndarray:
319
+ b, s, d = grad.shape
320
+ if self.dropout_mask2 is not None:
321
+ grad = grad * self.dropout_mask2
322
+ self.grad_W2 = (self.hidden_act.reshape(-1, self.d_ff).T @ grad.reshape(-1, d)).reshape(self.d_ff, d)
323
+ self.grad_b2 = np.sum(grad, axis=(0, 1), keepdims=True)
324
+ dhidden_act = grad @ self.W2.T
325
+ if self.dropout_mask1 is not None:
326
+ dhidden_act = dhidden_act * self.dropout_mask1
327
+ dhidden = dhidden_act * gelu_grad(self.hidden)
328
+ self.grad_W1 = (self.x.reshape(-1, self.d_model).T @ dhidden.reshape(-1, self.d_ff)).reshape(self.d_model, self.d_ff)
329
+ self.grad_b1 = np.sum(dhidden, axis=(0, 1), keepdims=True)
330
+ dx = dhidden @ self.W1.T
331
+ return dx
332
+
333
+ class MultiHeadSelfAttention:
334
+ def __init__(self, d_model: int, num_heads: int, dropout: float = 0.1, use_rotary: bool = False, dtype=DEFAULT_DTYPE):
335
+ assert d_model % num_heads == 0, "d_model must be divisible by num_heads"
336
+ self.d_model = d_model
337
+ self.num_heads = num_heads
338
+ self.head_dim = d_model // num_heads
339
+ self.dropout = dropout
340
+ self.use_rotary = use_rotary
341
+ self.dtype = dtype
342
+ scale = 1.0 / np.sqrt(d_model)
343
+ self.W_q = np.random.normal(0, scale, (d_model, d_model)).astype(dtype)
344
+ self.W_k = np.random.normal(0, scale, (d_model, d_model)).astype(dtype)
345
+ self.W_v = np.random.normal(0, scale, (d_model, d_model)).astype(dtype)
346
+ self.W_o = np.random.normal(0, scale, (d_model, d_model)).astype(dtype)
347
+ self.grad_W_q = np.zeros_like(self.W_q)
348
+ self.grad_W_k = np.zeros_like(self.W_k)
349
+ self.grad_W_v = np.zeros_like(self.W_v)
350
+ self.grad_W_o = np.zeros_like(self.W_o)
351
+ self.cache = {}
352
+ self.dropout_mask = None
353
+
354
+ def split_heads(self, x: np.ndarray) -> np.ndarray:
355
+ b, s, d = x.shape
356
+ x = x.reshape(b, s, self.num_heads, self.head_dim)
357
+ return np.transpose(x, (0, 2, 1, 3))
358
+
359
+ def combine_heads(self, x: np.ndarray) -> np.ndarray:
360
+ x = np.transpose(x, (0, 2, 1, 3))
361
+ b, s, h, hd = x.shape
362
+ return x.reshape(b, s, h * hd)
363
+
364
+ def causal_mask(self, seq_len: int) -> np.ndarray:
365
+ return np.tril(np.ones((seq_len, seq_len), dtype=bool))
366
+
367
+ def apply_rotary_embeddings(self, q: np.ndarray, k: np.ndarray, seq_dim: int = -2) -> Tuple[np.ndarray, np.ndarray]:
368
+ q_rotated = PositionalEmbedding.apply_rotary_pos_emb(q, seq_dim=seq_dim)
369
+ k_rotated = PositionalEmbedding.apply_rotary_pos_emb(k, seq_dim=seq_dim)
370
+ return q_rotated, k_rotated
371
+
372
+ def forward(self, x: np.ndarray, training: bool = True) -> np.ndarray:
373
+ b, s, d = x.shape
374
+ Q = x @ self.W_q
375
+ K = x @ self.W_k
376
+ V = x @ self.W_v
377
+ Qh = self.split_heads(Q)
378
+ Kh = self.split_heads(K)
379
+ Vh = self.split_heads(V)
380
+ if self.use_rotary:
381
+ Qh, Kh = self.apply_rotary_embeddings(Qh, Kh)
382
+ dk = self.head_dim
383
+ scores = Qh @ np.swapaxes(Kh, -1, -2) / np.sqrt(dk)
384
+ mask = self.causal_mask(s)[np.newaxis, np.newaxis, :, :]
385
+ scores = np.where(mask, scores, -np.inf)
386
+ attn = softmax(scores, axis=-1)
387
+ if training and self.dropout > 0:
388
+ self.dropout_mask = (np.random.rand(*attn.shape) > self.dropout)
389
+ attn = attn * self.dropout_mask / (1 - self.dropout)
390
+ else:
391
+ self.dropout_mask = None
392
+ attn_out = attn @ Vh
393
+ out = self.combine_heads(attn_out) @ self.W_o
394
+ self.cache = {
395
+ 'x': x, 'Q': Q, 'K': K, 'V': V,
396
+ 'Qh': Qh, 'Kh': Kh, 'Vh': Vh,
397
+ 'scores': scores, 'attn': attn, 'attn_out': attn_out,
398
+ 'mask': mask
399
+ }
400
+ return out
401
+
402
+ def backward(self, grad_out: np.ndarray) -> np.ndarray:
403
+ x = self.cache['x']
404
+ Qh = self.cache['Qh']
405
+ Kh = self.cache['Kh']
406
+ Vh = self.cache['Vh']
407
+ attn = self.cache['attn']
408
+ attn_out = self.cache['attn_out']
409
+ mask = self.cache['mask']
410
+ b, s, d = grad_out.shape
411
+ dk = self.head_dim
412
+ if self.dropout_mask is not None:
413
+ attn = attn * self.dropout_mask
414
+ out_concat = self.combine_heads(attn_out)
415
+ self.grad_W_o = out_concat.reshape(-1, d).T @ grad_out.reshape(-1, d)
416
+ d_out_concat = grad_out @ self.W_o.T
417
+ d_attn_out = d_out_concat.reshape(b, s, self.num_heads, self.head_dim)
418
+ d_attn_out = np.transpose(d_attn_out, (0, 2, 1, 3))
419
+ dVh = np.matmul(np.swapaxes(attn, -1, -2), d_attn_out)
420
+ dattn = np.matmul(d_attn_out, np.swapaxes(Vh, -1, -2))
421
+ sft = attn
422
+ sum_d = np.sum(dattn * sft, axis=-1, keepdims=True)
423
+ dscores = sft * (dattn - sum_d)
424
+ dscores = np.where(mask, dscores, 0.0)
425
+ dQh = np.matmul(dscores, Kh) / np.sqrt(dk)
426
+ dKh = np.matmul(np.swapaxes(dscores, -1, -2), Qh) / np.sqrt(dk)
427
+ dQ = np.transpose(dQh, (0, 2, 1, 3)).reshape(b, s, d)
428
+ dK = np.transpose(dKh, (0, 2, 1, 3)).reshape(b, s, d)
429
+ dV = np.transpose(dVh, (0, 2, 1, 3)).reshape(b, s, d)
430
+ self.grad_W_q = x.reshape(-1, d).T @ dQ.reshape(-1, d)
431
+ self.grad_W_k = x.reshape(-1, d).T @ dK.reshape(-1, d)
432
+ self.grad_W_v = x.reshape(-1, d).T @ dV.reshape(-1, d)
433
+ dx_q = dQ @ self.W_q.T
434
+ dx_k = dK @ self.W_k.T
435
+ dx_v = dV @ self.W_v.T
436
+ dx = dx_q + dx_k + dx_v
437
+ return dx
438
+
439
+ class DecoderBlock:
440
+ def __init__(self, d_model: int, num_heads: int, d_ff: int, dropout: float = 0.1,
441
+ layer_scale: bool = False, layer_scale_init: float = 1e-4, use_rotary: bool = False):
442
+ self.mha = MultiHeadSelfAttention(d_model, num_heads, dropout, use_rotary)
443
+ self.ln1 = LayerNorm(d_model, rms_norm=False)
444
+ self.ff = FeedForward(d_model, d_ff, dropout)
445
+ self.ln2 = LayerNorm(d_model, rms_norm=False)
446
+ self.dropout = dropout
447
+ self.layer_scale = layer_scale
448
+ self.layer_scale_init = layer_scale_init
449
+ if layer_scale:
450
+ self.gamma1 = np.ones((1, 1, d_model)) * layer_scale_init
451
+ self.gamma2 = np.ones((1, 1, d_model)) * layer_scale_init
452
+
453
+ def forward(self, x: np.ndarray, training: bool = True) -> np.ndarray:
454
+ attn_out = self.mha.forward(x, training)
455
+ if self.layer_scale:
456
+ attn_out = attn_out * self.gamma1
457
+ x = x + attn_out
458
+ x = self.ln1.forward(x)
459
+ ff_out = self.ff.forward(x, training)
460
+ if self.layer_scale:
461
+ ff_out = ff_out * self.gamma2
462
+ x = x + ff_out
463
+ x = self.ln2.forward(x)
464
+ return x
465
+
466
+ def backward(self, grad: np.ndarray) -> np.ndarray:
467
+ d_ln2 = self.ln2.backward(grad)
468
+ d_ff = self.ff.backward(d_ln2)
469
+ if self.layer_scale:
470
+ d_ff = d_ff * self.gamma2
471
+ d_res = d_ln2 + d_ff
472
+ d_ln1 = self.ln1.backward(d_res)
473
+ d_mha = self.mha.backward(d_ln1)
474
+ if self.layer_scale:
475
+ d_mha = d_mha * self.gamma1
476
+ dx = d_mha + d_ln1
477
+ return dx
478
+
479
+ class GPT:
480
+ def __init__(self, vocab_size: int, max_len: int = 512, d_model: int = 768, num_heads: int = 12,
481
+ d_ff: int = 3072, num_layers: int = 12, dropout: float = 0.1,
482
+ use_rotary: bool = False, rms_norm: bool = False, layer_scale: bool = False,
483
+ dtype=DEFAULT_DTYPE):
484
+ self.vocab_size = vocab_size
485
+ self.max_len = max_len
486
+ self.d_model = d_model
487
+ self.dtype = dtype
488
+ self.embed = Embedding(vocab_size, d_model, dtype)
489
+ self.pos_embed = PositionalEmbedding(max_len, d_model, use_rotary, dtype)
490
+ self.layers = [
491
+ DecoderBlock(d_model, num_heads, d_ff, dropout, layer_scale, use_rotary=use_rotary)
492
+ for _ in range(num_layers)
493
+ ]
494
+ self.ln_f = LayerNorm(d_model, rms_norm=rms_norm, dtype=dtype)
495
+ self.dropout = dropout
496
+ self.W_out = np.random.normal(0, 1.0 / np.sqrt(d_model), (d_model, vocab_size)).astype(dtype)
497
+ self.grad_W_out = np.zeros_like(self.W_out)
498
+ self.opt_states = {}
499
+ self.lr = 0.0
500
+ self.beta1 = 0.0
501
+ self.beta2 = 0.0
502
+ self.eps = 0.0
503
+ self.opt_step = 0
504
+ self.training = True
505
+
506
+ def parameters(self) -> List[Tuple[str, np.ndarray]]:
507
+ params = []
508
+ params.append(('embed.W', self.embed.W))
509
+ if not self.pos_embed.use_rotary:
510
+ params.append(('pos.W', self.pos_embed.W))
511
+ for i, layer in enumerate(self.layers):
512
+ params.append((f'layer{i}.mha.W_q', layer.mha.W_q))
513
+ params.append((f'layer{i}.mha.W_k', layer.mha.W_k))
514
+ params.append((f'layer{i}.mha.W_v', layer.mha.W_v))
515
+ params.append((f'layer{i}.mha.W_o', layer.mha.W_o))
516
+ params.append((f'layer{i}.ln1.gamma', layer.ln1.gamma))
517
+ params.append((f'layer{i}.ln1.beta', layer.ln1.beta))
518
+ params.append((f'layer{i}.ff.W1', layer.ff.W1))
519
+ params.append((f'layer{i}.ff.b1', layer.ff.b1))
520
+ params.append((f'layer{i}.ff.W2', layer.ff.W2))
521
+ params.append((f'layer{i}.ff.b2', layer.ff.b2))
522
+ params.append((f'layer{i}.ln2.gamma', layer.ln2.gamma))
523
+ params.append((f'layer{i}.ln2.beta', layer.ln2.beta))
524
+ if layer.layer_scale:
525
+ params.append((f'layer{i}.gamma1', layer.gamma1))
526
+ params.append((f'layer{i}.gamma2', layer.gamma2))
527
+ if not self.ln_f.rms_norm:
528
+ params.append(('ln_f.gamma', self.ln_f.gamma))
529
+ params.append(('ln_f.beta', self.ln_f.beta))
530
+ else:
531
+ params.append(('ln_f.weight', self.ln_f.weight))
532
+ params.append(('W_out', self.W_out))
533
+ return params
534
+
535
+ def zero_grads(self):
536
+ self.embed.grad_W.fill(0.0)
537
+ if not self.pos_embed.use_rotary:
538
+ self.pos_embed.grad_W.fill(0.0)
539
+ for layer in self.layers:
540
+ layer.mha.grad_W_q.fill(0.0)
541
+ layer.mha.grad_W_k.fill(0.0)
542
+ layer.mha.grad_W_v.fill(0.0)
543
+ layer.mha.grad_W_o.fill(0.0)
544
+ layer.ln1.grad_gamma.fill(0.0)
545
+ layer.ln1.grad_beta.fill(0.0)
546
+ layer.ff.grad_W1.fill(0.0)
547
+ layer.ff.grad_b1.fill(0.0)
548
+ layer.ff.grad_W2.fill(0.0)
549
+ layer.ff.grad_b2.fill(0.0)
550
+ layer.ln2.grad_gamma.fill(0.0)
551
+ layer.ln2.grad_beta.fill(0.0)
552
+ if not self.ln_f.rms_norm:
553
+ self.ln_f.grad_gamma.fill(0.0)
554
+ self.ln_f.grad_beta.fill(0.0)
555
+ else:
556
+ self.ln_f.grad_weight.fill(0.0)
557
+ self.grad_W_out.fill(0.0)
558
+
559
+ def forward(self, idx: np.ndarray, training: bool = True) -> np.ndarray:
560
+ self.training = training
561
+ b, s = idx.shape
562
+ x = self.embed.forward(idx)
563
+ if not self.pos_embed.use_rotary:
564
+ x = x + self.pos_embed.forward(s)
565
+ for layer in self.layers:
566
+ x = layer.forward(x, training)
567
+ x = self.ln_f.forward(x)
568
+ if training and self.dropout > 0:
569
+ dropout_mask = (np.random.rand(*x.shape) > self.dropout)
570
+ x = x * dropout_mask / (1 - self.dropout)
571
+ logits = x.reshape(-1, self.d_model) @ self.W_out
572
+ logits = logits.reshape(b, s, -1)
573
+ self._cache = {'x': x, 'idx': idx}
574
+ return logits
575
+
576
+ def loss_and_backward(self, idx_in: np.ndarray, idx_target: np.ndarray,
577
+ grad_clip: float = 1.0) -> float:
578
+ b, s = idx_in.shape
579
+ logits = self.forward(idx_in, training=True)
580
+ vocab = logits.shape[-1]
581
+ logits_flat = logits.reshape(-1, vocab)
582
+ targets_flat = idx_target.reshape(-1)
583
+ probs = softmax(logits_flat, axis=1)
584
+ log_probs = np.log(np.clip(probs, 1e-12, 1.0))
585
+ loss = -np.mean(log_probs[np.arange(len(targets_flat)), targets_flat])
586
+ grad_logits = probs.copy()
587
+ grad_logits[np.arange(grad_logits.shape[0]), targets_flat] -= 1
588
+ grad_logits = grad_logits.reshape(b, s, vocab) / (b * s)
589
+ x = self._cache['x']
590
+ self.grad_W_out = x.reshape(-1, self.d_model).T @ grad_logits.reshape(-1, vocab)
591
+ dx = grad_logits.reshape(-1, vocab) @ self.W_out.T
592
+ dx = dx.reshape(b, s, self.d_model)
593
+ d_ln = self.ln_f.backward(dx)
594
+ grad = d_ln
595
+ for layer in reversed(self.layers):
596
+ grad = layer.backward(grad)
597
+ idx = self._cache['idx']
598
+ self.embed.backward(idx, grad)
599
+ if not self.pos_embed.use_rotary:
600
+ self.pos_embed.backward(s, grad)
601
+ if grad_clip > 0:
602
+ total_norm = 0.0
603
+ for _, param in self.parameters():
604
+ if param.grad is not None:
605
+ param_norm = np.linalg.norm(param.grad)
606
+ total_norm += param_norm ** 2
607
+ total_norm = np.sqrt(total_norm)
608
+ clip_coef = min(grad_clip / (total_norm + EPS), 1.0)
609
+ if clip_coef < 1:
610
+ for _, param in self.parameters():
611
+ if param.grad is not None:
612
+ param.grad *= clip_coef
613
+ return loss
614
+
615
+ def init_optimizer(self, lr: float = 6e-4, betas=(0.9, 0.95), eps=1e-8,
616
+ weight_decay: float = 0.1, warmup_steps: int = 2000):
617
+ self.lr = lr
618
+ self.beta1 = betas[0]
619
+ self.beta2 = betas[1]
620
+ self.eps = eps
621
+ self.weight_decay = weight_decay
622
+ self.warmup_steps = warmup_steps
623
+ self.opt_step = 0
624
+ self.opt_states = {}
625
+ for name, param in self.parameters():
626
+ self.opt_states[name] = {
627
+ 'm': np.zeros_like(param),
628
+ 'v': np.zeros_like(param)
629
+ }
630
+
631
+ def step_optimizer(self, current_step: Optional[int] = None):
632
+ if current_step is not None:
633
+ self.opt_step = current_step
634
+ self.opt_step += 1
635
+ if self.warmup_steps > 0:
636
+ lr = self.lr * min(self.opt_step ** -0.5, self.opt_step * self.warmup_steps ** -1.5)
637
+ else:
638
+ lr = self.lr
639
+ def update(name: str, param: np.ndarray, grad: np.ndarray):
640
+ if 'W_' in name and self.weight_decay > 0:
641
+ grad = grad + self.weight_decay * param
642
+ state = self.opt_states[name]
643
+ state['m'] = self.beta1 * state['m'] + (1 - self.beta1) * grad
644
+ state['v'] = self.beta2 * state['v'] + (1 - self.beta2) * (grad ** 2)
645
+ m_hat = state['m'] / (1 - self.beta1 ** self.opt_step)
646
+ v_hat = state['v'] / (1 - self.beta2 ** self.opt_step)
647
+ param -= lr * m_hat / (np.sqrt(v_hat) + self.eps)
648
+ for name, param in self.parameters():
649
+ if name in ['embed.W', 'pos.W', 'W_out'] or 'W_' in name:
650
+ grad = getattr(self, f"grad_{name.split('.')[0]}")
651
+ else:
652
+ grad = getattr(self, f"grad_{name.replace('.', '_')}")
653
+ update(name, param, grad)
654
+
655
+ def enable_gradient_checkpointing(self):
656
+ warnings.warn("Gradient checkpointing is not implemented in this NumPy version", RuntimeWarning)
657
+
658
+ def convert_to_rms_norm(self):
659
+ self.ln_f = LayerNorm(self.d_model, rms_norm=True, dtype=self.dtype)
660
+ for layer in self.layers:
661
+ layer.ln1 = LayerNorm(self.d_model, rms_norm=True, dtype=self.dtype)
662
+ layer.ln2 = LayerNorm(self.d_model, rms_norm=True, dtype=self.dtype)
663
+
664
+ def save(self, path: str, include_optimizer: bool = False):
665
+ data = {
666
+ 'config': {
667
+ 'vocab_size': self.vocab_size,
668
+ 'max_len': self.max_len,
669
+ 'd_model': self.d_model,
670
+ 'num_heads': self.layers[0].mha.num_heads,
671
+ 'd_ff': self.layers[0].ff.d_ff,
672
+ 'num_layers': len(self.layers),
673
+ 'dropout': self.dropout,
674
+ 'use_rotary': self.pos_embed.use_rotary,
675
+ 'rms_norm': self.ln_f.rms_norm,
676
+ 'layer_scale': any(layer.layer_scale for layer in self.layers)
677
+ },
678
+ 'embed.W': self.embed.W,
679
+ 'pos.W': self.pos_embed.W if not self.pos_embed.use_rotary else None,
680
+ 'layers': [],
681
+ 'ln_f.gamma': self.ln_f.gamma if not self.ln_f.rms_norm else None,
682
+ 'ln_f.beta': self.ln_f.beta if not self.ln_f.rms_norm else None,
683
+ 'ln_f.weight': self.ln_f.weight if self.ln_f.rms_norm else None,
684
+ 'W_out': self.W_out
685
+ }
686
+ for layer in self.layers:
687
+ layer_data = {
688
+ 'mha.W_q': layer.mha.W_q,
689
+ 'mha.W_k': layer.mha.W_k,
690
+ 'mha.W_v': layer.mha.W_v,
691
+ 'mha.W_o': layer.mha.W_o,
692
+ 'ff.W1': layer.ff.W1,
693
+ 'ff.b1': layer.ff.b1,
694
+ 'ff.W2': layer.ff.W2,
695
+ 'ff.b2': layer.ff.b2,
696
+ 'ln1.gamma': layer.ln1.gamma,
697
+ 'ln1.beta': layer.ln1.beta,
698
+ 'ln2.gamma': layer.ln2.gamma,
699
+ 'ln2.beta': layer.ln2.beta
700
+ }
701
+ if layer.layer_scale:
702
+ layer_data['gamma1'] = layer.gamma1
703
+ layer_data['gamma2'] = layer.gamma2
704
+ data['layers'].append(layer_data)
705
+ if include_optimizer and self.opt_states:
706
+ data['optimizer'] = {
707
+ 'lr': self.lr,
708
+ 'beta1': self.beta1,
709
+ 'beta2': self.beta2,
710
+ 'eps': self.eps,
711
+ 'weight_decay': self.weight_decay,
712
+ 'warmup_steps': self.warmup_steps,
713
+ 'opt_step': self.opt_step,
714
+ 'states': {k: {'m': v['m'], 'v': v['v']} for k, v in self.opt_states.items()}
715
+ }
716
+ os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True)
717
+ with open(path, 'wb') as f:
718
+ pickle.dump(data, f)
719
+
720
+ def load(self, path: str, strict: bool = True):
721
+ with open(path, 'rb') as f:
722
+ data = pickle.load(f)
723
+ self.embed.W = data['embed.W']
724
+ if not self.pos_embed.use_rotary and data['pos.W'] is not None:
725
+ self.pos_embed.W = data['pos.W']
726
+ for layer, ld in zip(self.layers, data['layers']):
727
+ layer.mha.W_q = ld['mha.W_q']
728
+ layer.mha.W_k = ld['mha.W_k']
729
+ layer.mha.W_v = ld['mha.W_v']
730
+ layer.mha.W_o = ld['mha.W_o']
731
+ layer.ff.W1 = ld['ff.W1']
732
+ layer.ff.b1 = ld['ff.b1']
733
+ layer.ff.W2 = ld['ff.W2']
734
+ layer.ff.b2 = ld['ff.b2']
735
+ layer.ln1.gamma = ld['ln1.gamma']
736
+ layer.ln1.beta = ld['ln1.beta']
737
+ layer.ln2.gamma = ld['ln2.gamma']
738
+ layer.ln2.beta = ld['ln2.beta']
739
+ if hasattr(layer, 'gamma1') and 'gamma1' in ld:
740
+ layer.gamma1 = ld['gamma1']
741
+ if hasattr(layer, 'gamma2') and 'gamma2' in ld:
742
+ layer.gamma2 = ld['gamma2']
743
+ if not self.ln_f.rms_norm:
744
+ self.ln_f.gamma = data['ln_f.gamma']
745
+ self.ln_f.beta = data['ln_f.beta']
746
+ else:
747
+ self.ln_f.weight = data['ln_f.weight']
748
+ self.W_out = data['W_out']
749
+ if 'optimizer' in data and self.opt_states:
750
+ opt_data = data['optimizer']
751
+ self.lr = opt_data['lr']
752
+ self.beta1 = opt_data['beta1']
753
+ self.beta2 = opt_data['beta2']
754
+ self.eps = opt_data['eps']
755
+ self.weight_decay = opt_data.get('weight_decay', 0.1)
756
+ self.warmup_steps = opt_data.get('warmup_steps', 2000)
757
+ self.opt_step = opt_data['opt_step']
758
+ for name, state in opt_data['states'].items():
759
+ if name in self.opt_states:
760
+ self.opt_states[name]['m'] = state['m']
761
+ self.opt_states[name]['v'] = state['v']
762
+
763
+ def generate(self, idx_start: List[int], max_new_tokens: int = 50,
764
+ temperature: float = 1.0, top_k: Optional[int] = None,
765
+ top_p: Optional[float] = None, do_sample: bool = True) -> List[int]:
766
+ idx = list(idx_start)
767
+ for _ in range(max_new_tokens):
768
+ input_ids = np.array([idx[-self.max_len:]], dtype=np.int32)
769
+ logits = self.forward(input_ids, training=False)
770
+ next_logits = logits[0, -1] / max(temperature, 1e-8)
771
+ if top_k is not None and top_k > 0:
772
+ top_k = min(top_k, len(next_logits))
773
+ top_k_idx = np.argpartition(next_logits, -top_k)[-top_k:]
774
+ top_k_logits = next_logits[top_k_idx]
775
+ if top_p is not None and top_p < 1.0:
776
+ sorted_idx = np.argsort(top_k_logits)[::-1]
777
+ sorted_logits = top_k_logits[sorted_idx]
778
+ cumulative_probs = np.cumsum(softmax(sorted_logits))
779
+ cutoff_idx = np.where(cumulative_probs > top_p)[0][0]
780
+ top_p_idx = top_k_idx[sorted_idx[:cutoff_idx + 1]]
781
+ top_p_logits = next_logits[top_p_idx]
782
+ probs = softmax(top_p_logits)
783
+ next_id = np.random.choice(top_p_idx, p=probs) if do_sample else top_p_idx[np.argmax(top_p_logits)]
784
+ else:
785
+ probs = softmax(top_k_logits)
786
+ next_id = np.random.choice(top_k_idx, p=probs) if do_sample else top_k_idx[np.argmax(top_k_logits)]
787
+ else:
788
+ if top_p is not None and top_p < 1.0:
789
+ sorted_idx = np.argsort(next_logits)[::-1]
790
+ sorted_logits = next_logits[sorted_idx]
791
+ cumulative_probs = np.cumsum(softmax(sorted_logits))
792
+ cutoff_idx = np.where(cumulative_probs > top_p)[0][0]
793
+ top_p_idx = sorted_idx[:cutoff_idx + 1]
794
+ top_p_logits = next_logits[top_p_idx]
795
+ probs = softmax(top_p_logits)
796
+ next_id = np.random.choice(top_p_idx, p=probs) if do_sample else top_p_idx[np.argmax(top_p_logits)]
797
+ else:
798
+ probs = softmax(next_logits)
799
+ next_id = np.random.choice(len(probs), p=probs) if do_sample else np.argmax(probs)
800
+ idx.append(int(next_id))
801
+ return idx
802
+
803
+ def evaluate(self, val_data: np.ndarray, seq_len: int, batch_size: int,
804
+ tokenizer: Any) -> Tuple[float, float]:
805
+ total_loss = 0.0
806
+ total_tokens = 0
807
+ n_batches = 0
808
+ for xb, yb in get_batches_from_text(val_data, seq_len, batch_size, tokenizer):
809
+ original_dropout = self.dropout
810
+ self.dropout = 0.0
811
+ b, s = xb.shape
812
+ logits = self.forward(xb, training=False)
813
+ vocab = logits.shape[-1]
814
+ logits_flat = logits.reshape(-1, vocab)
815
+ targets_flat = yb.reshape(-1)
816
+ probs = softmax(logits_flat, axis=1)
817
+ log_probs = np.log(np.clip(probs, 1e-12, 1.0))
818
+ loss = -np.mean(log_probs[np.arange(len(targets_flat)), targets_flat])
819
+ total_loss += loss * len(targets_flat)
820
+ total_tokens += len(targets_flat)
821
+ n_batches += 1
822
+ self.dropout = original_dropout
823
+ avg_loss = total_loss / total_tokens
824
+ perplexity = np.exp(avg_loss)
825
+ return avg_loss, perplexity
826
+
827
+ class Trainer:
828
+ def __init__(self, model: GPT, tokenizer: Any, train_data: str,
829
+ val_data: Optional[str] = None, seq_len: int = 1024,
830
+ batch_size: int = 8, grad_accum_steps: int = 1):
831
+ self.model = model
832
+ self.tokenizer = tokenizer
833
+ self.train_data = train_data
834
+ self.val_data = val_data
835
+ self.seq_len = seq_len
836
+ self.batch_size = batch_size
837
+ self.grad_accum_steps = grad_accum_steps
838
+ self.history = {'train_loss': [], 'val_loss': [], 'perplexity': [], 'lr': []}
839
+ self.best_val_loss = float('inf')
840
+ self.patience_counter = 0
841
+
842
+ def train(self, epochs: int = 10, lr: float = 3e-4, weight_decay: float = 0.1,
843
+ warmup_steps: int = 2000, grad_clip: float = 1.0,
844
+ val_interval: int = 1, early_stopping_patience: int = 5,
845
+ checkpoint_dir: str = 'checkpoints', save_best: bool = True):
846
+ os.makedirs(checkpoint_dir, exist_ok=True)
847
+ self.model.init_optimizer(
848
+ lr=lr,
849
+ weight_decay=weight_decay,
850
+ warmup_steps=warmup_steps
851
+ )
852
+ total_steps = 0
853
+ start_time = time.time()
854
+ for epoch in range(1, epochs + 1):
855
+ print(f"\nEpoch {epoch}/{epochs}")
856
+ epoch_start = time.time()
857
+ total_loss = 0.0
858
+ n_batches = 0
859
+ total_steps += len(self.train_data) // (self.seq_len * self.batch_size)
860
+ for i, (xb, yb) in enumerate(get_batches_from_text(
861
+ self.train_data, self.seq_len, self.batch_size, self.tokenizer)):
862
+ loss = self.model.loss_and_backward(xb, yb, grad_clip)
863
+ total_loss += loss
864
+ n_batches += 1
865
+ if (i + 1) % self.grad_accum_steps == 0 or (i + 1) == n_batches:
866
+ self.model.step_optimizer(total_steps)
867
+ self.model.zero_grads()
868
+ if i % 10 == 0:
869
+ current_lr = lr * min(total_steps ** -0.5, total_steps * warmup_steps ** -1.5) if warmup_steps > 0 else lr
870
+ print(f'Step {i+1}/{n_batches}, Loss: {loss:.4f}, LR: {current_lr:.2e}', end='\r')
871
+ avg_loss = total_loss / max(1, n_batches)
872
+ self.history['train_loss'].append(avg_loss)
873
+ val_loss = float('inf')
874
+ perplexity = float('inf')
875
+ if self.val_data and epoch % val_interval == 0:
876
+ val_loss, perplexity = self.model.evaluate(
877
+ self.val_data, self.seq_len, self.batch_size, self.tokenizer
878
+ )
879
+ self.history['val_loss'].append(val_loss)
880
+ self.history['perplexity'].append(perplexity)
881
+ if save_best and val_loss < self.best_val_loss:
882
+ self.best_val_loss = val_loss
883
+ best_path = os.path.join(checkpoint_dir, 'best_model.pkl')
884
+ self.model.save(best_path, include_optimizer=True)
885
+ print(f"\n[INFO] Best model saved with validation loss: {val_loss:.4f}")
886
+ self.patience_counter = 0
887
+ else:
888
+ self.patience_counter += 1
889
+ epoch_time = time.time() - epoch_start
890
+ print(f"\nEpoch {epoch} completed in {epoch_time:.2f}s | "
891
+ f"Train Loss: {avg_loss:.4f} | "
892
+ f"Val Loss: {val_loss:.4f} | "
893
+ f"Perplexity: {perplexity:.2f}")
894
+ start_prompt = 'دوست '
895
+ start_ids = [self.tokenizer.w2i.get(c, self.tokenizer.w2i['<unk>']) for c in start_prompt]
896
+ gen = self.model.generate(start_ids, max_new_tokens=100, temperature=0.8, top_k=50, top_p=0.9)
897
+ print('Sample:', self.tokenizer.decode(np.array(gen)))
898
+ if epoch % 5 == 0:
899
+ ckpt_path = os.path.join(checkpoint_dir, f'model_epoch_{epoch}.pkl')
900
+ self.model.save(ckpt_path)
901
+ print(f"[INFO] Checkpoint saved to {ckpt_path}")
902
+ if early_stopping_patience > 0 and self.patience_counter >= early_stopping_patience:
903
+ print(f"\n[INFO] Early stopping triggered after {epoch} epochs")
904
+ break
905
+ total_time = time.time() - start_time
906
+ print(f"\nTraining completed in {total_time/60:.2f} minutes")
907
+ return self.history
908
+
909
+ if __name__ == '__main__':
910
+ seq_len = 128
911
+ batch_size = 8
912
+ epochs = 50
913
+ lr = 6e-4
914
+ try:
915
+ with open('sample_text.txt', 'r', encoding='utf-8') as f:
916
+ sample_text = f.read()
917
+ except:
918
+ sample_text = """
919
+ دوست دارم برنامه‌نویسی کنم. این یک متن نمونه است برای آموزش مدل GPT کوچک.
920
+ مدل می‌تواند کاراکترها را یاد بگیرد و متن تولید کند.
921
+ هوش مصنوعی یکی از حوزه‌های پررونق در دنیای امروز است.
922
+ مدل‌های زبانی بزرگ قادر به انجام کارهای شگفت‌انگیزی هستند.
923
+ در این مثال ساده، ما یک مدل GPT کوچک را پیاده‌سازی می‌کنیم.
924
+ """
925
+ train_ratio = 0.9
926
+ split_idx = int(len(sample_text) * train_ratio)
927
+ train_text = sample_text[:split_idx]
928
+ val_text = sample_text[split_idx:]
929
+ print("Building tokenizer...")
930
+ tok = BPETokenizer()
931
+ tok.build_from_text([train_text], vocab_size=500)
932
+ vocab_size = len(tok.vocab)
933
+ print(f'Vocabulary size: {vocab_size}')
934
+ print("Building model...")
935
+ model = GPT(
936
+ vocab_size=vocab_size,
937
+ max_len=seq_len,
938
+ d_model=256,
939
+ num_heads=8,
940
+ d_ff=1024,
941
+ num_layers=6,
942
+ dropout=0.1,
943
+ use_rotary=False,
944
+ rms_norm=True,
945
+ layer_scale=True
946
+ )
947
+ print("\nStarting training...")
948
+ trainer = Trainer(
949
+ model=model,
950
+ tokenizer=tok,
951
+ train_data=train_text,
952
+ val_data=val_text,
953
+ seq_len=seq_len,
954
+ batch_size=batch_size
955
+ )
956
+ history = trainer.train(
957
+ epochs=epochs,
958
+ lr=lr,
959
+ weight_decay=0.1,
960
+ warmup_steps=1000,
961
+ grad_clip=1.0,
962
+ val_interval=1,
963
+ early_stopping_patience=10,
964
+ checkpoint_dir='checkpoints'
965
+ )
966
+ model.save('gpt_final.pkl')
967
+ print('Final model saved -> gpt_final.pkl')
968
+
969
+
970
+
971
+
972
+
973
+
974
+
975
+ """
976
+ LICENSE:
977
+
978
+ Copyright 2025 ysnrfd
979
+
980
+ Timestamp: 2025-08-12
981
+
982
+ Permission is hereby granted, free of charge, to any person obtaining a copy
983
+ of this software and associated documentation files (the "Software"), to use,
984
+ copy, modify, and distribute the Software, subject to the following conditions:
985
+
986
+ 1. The copyright notice, this permission notice, and all attribution information
987
+ regarding the original author (ysnrfd) must be preserved in their entirety
988
+ and must not be removed, altered, or obscured in any copies or derivative works.
989
+
990
+ 2. Any modifications or derivative works must be clearly documented in a "CHANGELOG" or
991
+ "NOTICE" file included with the Software. This documentation must include a detailed
992
+ description of the changes made, the date of the modification, and the identity of
993
+ the modifier.
994
+
995
+ 3. The Software is provided "as is", without warranty of any kind, express or implied.
996
+ The author shall not be liable for any damages arising from use of the Software.
997
+
998
+ 4. Any attempt to remove or alter the original attribution or copyright information
999
+ constitutes a violation of this license and may result in legal action.
1000
+
1001
+ """