-
Notifications
You must be signed in to change notification settings - Fork 87
Expand file tree
/
Copy pathgpt.py
More file actions
312 lines (250 loc) · 9.85 KB
/
gpt.py
File metadata and controls
312 lines (250 loc) · 9.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
# Code takes heavy inspiration from Andrej Karpathy's two implementations:
# nanochat: https://github.com/karpathy/nanochat/blob/master/nanochat/gpt.py
# "Let's build GPT" video: https://github.com/karpathy/ng-video-lecture/blob/master/gpt.py
import os
import sys
import time
import torch
import torch.nn as nn
from torch.nn import functional as F
# hyperparameters
batch_size = 64 # how many independent sequences will we process in parallel?
block_size = 256 # what is the maximum context length for predictions?
max_iters = 5000
eval_interval = 500
learning_rate = 3e-4
device = (
"cuda"
if torch.cuda.is_available()
else ("mps" if torch.backends.mps.is_available() else "cpu")
)
eval_iters = 200
n_embd = 384
n_head = 6
n_layer = 6
head_dim = n_embd // n_head
# ------------
torch.manual_seed(1337)
# Load data
with open("data.txt", "r", encoding="utf-8") as f:
text = f.read()
# All the unique characters that occur in this text
chars = sorted(list(set(text)))
vocab_size = len(chars)
# Create a mapping from characters to integers
stoi = {ch: i for i, ch in enumerate(chars)}
itos = {i: ch for i, ch in enumerate(chars)}
# encoder: take a string, output a list of integers
def encode(s):
return [stoi[ch] for ch in s]
# decoder: take a list of integers, output a string
def decode(l):
return "".join([itos[n] for n in l])
# Train and test splits
data = torch.tensor(encode(text), dtype=torch.long)
n = int(0.9 * len(data)) # first 90% will be train, rest val
train_data = data[:n]
val_data = data[n:]
# data loading
def get_batch(split):
# generate a small batch of data of inputs x and targets y
data = train_data if split == "train" else val_data
idx = torch.randint(len(data) - block_size, (batch_size,))
x = torch.stack([data[i : i + block_size] for i in idx])
y = torch.stack([data[i + 1 : i + block_size + 1] for i in idx])
x, y = x.to(device), y.to(device)
return x, y
def norm(x):
# Purely functional rmsnorm with no learnable params
return F.rms_norm(x, (x.size(-1),))
def apply_rotary_emb(x, cos, sin):
assert x.ndim == 4 # multihead attention
d = x.shape[3] // 2
x1, x2 = x[..., :d], x[..., d:] # split up last time into two halves
y1 = x1 * cos + x2 * sin # rotate pairs of dims
y2 = x1 * (-sin) + x2 * cos
out = torch.cat([y1, y2], 3) # re-assemble
out = out.to(x.dtype) # ensure input/output dtypes match
return out
class MultiHeadAttention(nn.Module):
def __init__(self):
super().__init__()
self.c_q = nn.Linear(n_embd, n_embd, bias=False)
self.c_k = nn.Linear(n_embd, n_embd, bias=False)
self.c_v = nn.Linear(n_embd, n_embd, bias=False)
self.c_proj = nn.Linear(n_embd, n_embd, bias=False)
def forward(self, x, cos_sin):
B, T, C = x.size()
# Project the input to get queries, keys, and values
q = self.c_q(x).view(B, T, n_head, head_dim)
k = self.c_k(x).view(B, T, n_head, head_dim)
v = self.c_v(x).view(B, T, n_head, head_dim)
# Apply Rotary Embeddings to queries and keys to get relative positional encoding
cos, sin = cos_sin
q, k = apply_rotary_emb(q, cos, sin), apply_rotary_emb(k, cos, sin)
q, k = norm(q), norm(k) # QK norm
q, k, v = (
q.transpose(1, 2),
k.transpose(1, 2),
v.transpose(1, 2),
) # (B, T, H, D) -> (B, H, T, D)
# Self-attention
y = F.scaled_dot_product_attention(q, k, v, is_causal=True)
# Re-assemble the heads and project back
y = y.transpose(1, 2).contiguous().view(B, T, -1)
y = self.c_proj(y)
return y
class MLP(nn.Module):
def __init__(self):
super().__init__()
self.c_fc = nn.Linear(n_embd, 4 * n_embd, bias=False)
self.c_proj = nn.Linear(4 * n_embd, n_embd, bias=False)
def forward(self, x):
x = self.c_fc(x)
x = F.relu(x).square()
x = self.c_proj(x)
return x
class Block(nn.Module):
def __init__(self):
super().__init__()
self.attn = MultiHeadAttention()
self.mlp = MLP()
def forward(self, x, cos_sin):
x = x + self.attn(norm(x), cos_sin)
x = x + self.mlp(norm(x))
return x
class Model(nn.Module):
def __init__(self):
super().__init__()
# Token embeddings
self.token_emb = nn.Embedding(vocab_size, n_embd)
# Rotary embeddings
self.rotary_seq_len = block_size * 2
cos, sin = self._precompute_rotary_embeddings(self.rotary_seq_len)
self.register_buffer("cos", cos, persistent=False)
self.register_buffer("sin", sin, persistent=False)
# Transformer blocks
self.blocks = nn.ModuleList([Block() for _ in range(n_layer)])
# Output head to predict next token
self.lm_head = nn.Linear(n_embd, vocab_size, bias=False)
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, nn.Linear):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
def _precompute_rotary_embeddings(self, seq_len, base=10000, device=None):
if device is None:
device = self.token_emb.weight.device
channel_range = torch.arange(0, head_dim, 2, dtype=torch.float32, device=device)
inv_freq = 1.0 / (base ** (channel_range / head_dim))
t = torch.arange(seq_len, dtype=torch.float32, device=device)
freqs = torch.outer(t, inv_freq)
cos, sin = freqs.cos(), freqs.sin()
cos, sin = (
cos[None, :, None, :],
sin[None, :, None, :],
) # add batch and head dims
return cos, sin
def forward(self, idx, targets=None):
B, T = idx.size()
# Get embeddings
x = self.token_emb(idx) # (B, T, n_embd)
x = norm(x)
# Get rotary embeddings
assert T <= self.cos.size(1)
cos_sin = (self.cos[:, :T], self.sin[:, :T])
# Forward through transformer blocks
for block in self.blocks:
x = block(x, cos_sin)
x = norm(x)
# Predict next token
logits = self.lm_head(x) # (B, T, vocab_size)
if targets is None:
loss = None
else:
B, T, C = logits.shape
logits_flat = logits.view(B * T, C)
targets_flat = targets.view(B * T)
loss = F.cross_entropy(logits_flat, targets_flat)
return logits, loss
@torch.no_grad()
def generate(model, max_new_tokens, prompt_len=16, temp=1.0):
# Start with first prompt_len tokens from data as context
x = data[:prompt_len].unsqueeze(0).to(device) # (1, prompt_len)
# x is (B, T) array of indices in the current context
for _ in range(max_new_tokens):
# get the predictions on current context
cur_context = x[:, -block_size:]
logits, _ = model(cur_context)
# apply softmax to get probabilities on last token
logits = logits[:, -1, :] # becomes (B, C)
probs = F.softmax(logits / temp, dim=-1) # (B, C)
# sample from the distribution
next_token = (
torch.argmax(probs, dim=-1, keepdim=True) # greedy
if temp == 0
else torch.multinomial(probs, num_samples=1)
) # (B, 1)
# append sampled token to current context
x = torch.cat((x, next_token), dim=1) # (B, T+1)
return decode(x[0].tolist())
@torch.no_grad()
def estimate_loss():
out = {}
model.eval()
for split in ["train", "val"]:
losses = torch.zeros(eval_iters)
for k in range(eval_iters):
X, Y = get_batch(split)
_, loss = model(X, Y)
losses[k] = loss.item()
out[split] = losses.mean()
model.train()
return out
if __name__ == "__main__":
train_flag = "--train" in sys.argv
weights_path = "weights/gpt.pt"
os.makedirs(os.path.dirname(weights_path), exist_ok=True)
model = Model()
m = model.to(device)
# print the number of parameters in the model
print(sum(p.numel() for p in m.parameters()) / 1e6, "M parameters")
# Load weights if they exist and train flag not set
if os.path.exists(weights_path) and not train_flag:
print(f"Loading weights from {weights_path}")
m.load_state_dict(torch.load(weights_path, map_location=device))
else:
print("Training from scratch")
# create a PyTorch optimizer
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
start = time.time()
for iter in range(max_iters):
# every once in a while evaluate the loss on train and val sets
if iter % eval_interval == 0 or iter == max_iters - 1:
losses = estimate_loss()
print(
f"step {iter}: train loss {losses['train']:.4f},"
f"val loss {losses['val']:.4f}, time {time.time() - start:.2f} seconds"
)
# Generate a sample
sample = generate(m, max_new_tokens=240)
print(f"Sample:\n{sample}\n")
# sample a batch of data
xb, yb = get_batch("train")
# evaluate the loss
logits, loss = model(xb, yb)
optimizer.zero_grad(set_to_none=True)
loss.backward()
optimizer.step()
# Save the model weights
print(f"Total training time: {time.time() - start:.2f} seconds")
print(f"Saving weights to {weights_path}")
torch.save(m.state_dict(), weights_path)
# generate from the model
start = time.time()
output = generate(m, max_new_tokens=2000, temp=0.8)
print(f"Total generation time: {time.time() - start:.2f} seconds")
print(f"\nOutput:\n{output}")