Spaces:
Sleeping
Sleeping
File size: 1,842 Bytes
b57fe5a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import torch
import logging
from src.utils.helpers import get_batch
@torch.no_grad()
def estimate_loss(model, eval_iters, block_size, batch_size, device):
out = {}
model.eval()
for split in ['train', 'val']:
losses = torch.zeros(eval_iters)
for k in range(eval_iters):
xb, yb = get_batch(split, block_size, batch_size)
xb, yb = xb.to(device), yb.to(device)
logits, loss = model(xb, yb)
losses[k] = loss.item()
out[split] = losses.mean().item()
model.train()
return out
def train(
model,
optimizer,
max_iters,
eval_interval,
eval_iters,
block_size,
batch_size,
device,
checkpoint_path="checkpoints/model.pth"
):
logger = logging.getLogger(__name__)
best_val_loss = float('inf')
for iter in range(max_iters):
# Evaluation
if iter % eval_interval == 0:
losses = estimate_loss(model, eval_iters, block_size, batch_size, device)
logger.info(
f"Step {iter}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}"
)
# Save best model
if losses['val'] < best_val_loss:
best_val_loss = losses['val']
logger.info(f"Saving model with val loss: {best_val_loss:.4f}")
torch.save(model, checkpoint_path)
# Training step
xb, yb = get_batch('train', block_size, batch_size)
xb, yb = xb.to(device), yb.to(device)
# Forward pass
logits, loss = model(xb, yb)
# Backward pass
optimizer.zero_grad(set_to_none=True)
loss.backward()
optimizer.step()
# Save final model
torch.save(model, checkpoint_path) |