|
"""
|
|
This training script can be run both on a single gpu in debug mode,
|
|
and also in a larger training run with distributed data parallel (ddp).
|
|
|
|
To run on a single GPU small debug run, example:
|
|
$ python -m train.py --compile=False --eval_iters=10 --batch_size=8
|
|
|
|
To run with DDP on 4 gpus on 1 node, example:
|
|
$ torchrun --standalone --nproc_per_node=4 train.py
|
|
|
|
To run with DDP on 4 gpus across 2 nodes, example:
|
|
- Run on the first (master) node with example IP 123.456.123.456:
|
|
$ torchrun --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr=123.456.123.456 --master_port=1234 train.py
|
|
- Run on the worker node:
|
|
$ torchrun --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr=123.456.123.456 --master_port=1234 train.py
|
|
(If your cluster does not have Infiniband interconnect prepend NCCL_IB_DISABLE=1)
|
|
"""
|
|
|
|
import math
|
|
import os
|
|
import time
|
|
from contextlib import nullcontext
|
|
from datetime import datetime
|
|
from functools import partial
|
|
import inspect
|
|
import torch
|
|
from torch.distributed import destroy_process_group, init_process_group
|
|
from torch.nn.parallel import DistributedDataParallel as DDP
|
|
|
|
from tinystories import Task
|
|
|
|
from model import MambaLMHeadModel
|
|
|
|
|
|
|
|
|
|
out_dir = "out/768-8"
|
|
eval_interval = 2000
|
|
log_interval = 1
|
|
eval_iters = 100
|
|
eval_only = False
|
|
always_save_checkpoint = True
|
|
init_from = "resume"
|
|
|
|
wandb_log = True
|
|
wandb_project = "tiny-mambas"
|
|
wandb_run_name = "run" + datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
|
|
|
|
batch_size = 128
|
|
max_seq_len = 256
|
|
vocab_size = 4096
|
|
vocab_source = "custom"
|
|
|
|
d_model = 768
|
|
n_layer = 8
|
|
vocab_size = 4096
|
|
|
|
gradient_accumulation_steps = 4
|
|
learning_rate = 5e-4
|
|
max_iters = 100000
|
|
weight_decay = 1e-1
|
|
beta1 = 0.9
|
|
beta2 = 0.95
|
|
grad_clip = 1.0
|
|
|
|
decay_lr = True
|
|
warmup_iters = 1000
|
|
|
|
device = "cuda"
|
|
dtype = "float16"
|
|
compile = False
|
|
|
|
|
|
class mambaConfig:
|
|
|
|
d_model: int = d_model
|
|
n_layer: int = n_layer
|
|
vocab_size: int = vocab_size
|
|
ssm_cfg: dict = None
|
|
rms_norm: bool = True
|
|
residual_in_fp32: bool = True
|
|
fused_add_norm: bool = True
|
|
pad_vocab_size_multiple: int = 8
|
|
|
|
|
|
|
|
|
|
config_keys = [
|
|
k
|
|
for k, v in globals().items()
|
|
if not k.startswith("_") and isinstance(v, (int, float, bool, str))
|
|
]
|
|
config = {k: globals()[k] for k in config_keys}
|
|
|
|
|
|
lr_decay_iters = max_iters
|
|
min_lr = 5e-5
|
|
|
|
|
|
torch.cuda.set_device(0)
|
|
|
|
|
|
torch.manual_seed(1337)
|
|
torch.backends.cuda.matmul.allow_tf32 = True
|
|
torch.backends.cudnn.allow_tf32 = True
|
|
device_type = "cuda" if "cuda" in device else "cpu"
|
|
|
|
ptdtype = {"float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16}[dtype]
|
|
ctx = (
|
|
nullcontext()
|
|
if device_type == "cpu"
|
|
else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
|
|
)
|
|
|
|
|
|
iter_batches = partial(
|
|
Task.iter_batches,
|
|
batch_size=batch_size,
|
|
max_seq_len=max_seq_len,
|
|
vocab_size=vocab_size,
|
|
vocab_source=vocab_source,
|
|
device=device,
|
|
num_workers=0,
|
|
)
|
|
|
|
|
|
iter_num = 0
|
|
best_val_loss = 1e9
|
|
|
|
|
|
model_args = dict(
|
|
d_model=d_model,
|
|
n_layer=n_layer,
|
|
vocab_size=vocab_size,
|
|
max_seq_len=max_seq_len,
|
|
)
|
|
tokens_per_iter = gradient_accumulation_steps * 1 * batch_size * max_seq_len
|
|
|
|
|
|
if init_from == "scratch":
|
|
|
|
print("Initializing a new model from scratch")
|
|
|
|
model = MambaLMHeadModel(mambaConfig)
|
|
model.last_loss = None
|
|
elif init_from == "resume":
|
|
print(f"Resuming training from {out_dir}")
|
|
|
|
ckpt_path = os.path.join(out_dir, "ckpt.pt")
|
|
checkpoint = torch.load(ckpt_path, map_location=device)
|
|
checkpoint_model_args = checkpoint["model_args"]
|
|
|
|
|
|
for k in ["d_model", "n_layer", "vocab_size", "max_seq_len"]:
|
|
model_args[k] = checkpoint_model_args[k]
|
|
|
|
model = MambaLMHeadModel(mambaConfig)
|
|
model.last_loss = None
|
|
state_dict = checkpoint["model"]
|
|
|
|
|
|
unwanted_prefix = "_orig_mod."
|
|
for k, v in list(state_dict.items()):
|
|
if k.startswith(unwanted_prefix):
|
|
state_dict[k[len(unwanted_prefix) :]] = state_dict.pop(k)
|
|
model.load_state_dict(state_dict)
|
|
iter_num = checkpoint["iter_num"]
|
|
best_val_loss = checkpoint["best_val_loss"]
|
|
model.to(device)
|
|
|
|
|
|
scaler = torch.cuda.amp.GradScaler(enabled=(dtype == "float16"))
|
|
|
|
|
|
|
|
param_dict = {pn: p for pn, p in model.named_parameters()}
|
|
|
|
param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}
|
|
|
|
|
|
betas = (beta1, beta2)
|
|
decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
|
|
nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
|
|
optim_groups = [
|
|
{'params': decay_params, 'weight_decay': weight_decay},
|
|
{'params': nodecay_params, 'weight_decay': 0.0}
|
|
]
|
|
num_decay_params = sum(p.numel() for p in decay_params)
|
|
num_nodecay_params = sum(p.numel() for p in nodecay_params)
|
|
print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
|
|
print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
|
|
|
|
fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
|
|
use_fused = fused_available and device_type == 'cuda'
|
|
extra_args = dict(fused=True) if use_fused else dict()
|
|
optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
|
|
print(f"using fused AdamW: {use_fused}")
|
|
|
|
|
|
|
|
if init_from == "resume" and "optimizer" in checkpoint:
|
|
optimizer.load_state_dict(checkpoint["optimizer"])
|
|
checkpoint = None
|
|
|
|
|
|
if compile:
|
|
print("compiling the model... (takes a ~minute)")
|
|
unoptimized_model = model
|
|
model = torch.compile(model)
|
|
|
|
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
def estimate_loss():
|
|
out = {}
|
|
model.eval()
|
|
for split in ["train", "val"]:
|
|
batch_iter = iter_batches(split=split)
|
|
losses = torch.zeros(eval_iters)
|
|
for k in range(eval_iters):
|
|
X, Y = next(batch_iter)
|
|
with ctx:
|
|
logits = model(X, Y)
|
|
loss = raw_model.last_loss
|
|
losses[k] = loss.item()
|
|
out[split] = losses.mean()
|
|
model.train()
|
|
return out
|
|
|
|
|
|
def get_lr(it):
|
|
|
|
if it < warmup_iters:
|
|
return learning_rate * it / warmup_iters
|
|
|
|
if it > lr_decay_iters:
|
|
return min_lr
|
|
|
|
decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters)
|
|
assert 0 <= decay_ratio <= 1
|
|
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))
|
|
return min_lr + coeff * (learning_rate - min_lr)
|
|
|
|
|
|
if wandb_log:
|
|
import wandb
|
|
wandb.init(project=wandb_project, name=wandb_run_name, config=config)
|
|
|
|
|
|
train_batch_iter = iter_batches(split="train")
|
|
X, Y = next(train_batch_iter)
|
|
t0 = time.time()
|
|
local_iter_num = 0
|
|
raw_model = model
|
|
running_mfu = -1.0
|
|
while True:
|
|
|
|
lr = get_lr(iter_num) if decay_lr else learning_rate
|
|
for param_group in optimizer.param_groups:
|
|
param_group["lr"] = lr
|
|
|
|
|
|
if iter_num % eval_interval == 0:
|
|
losses = estimate_loss()
|
|
print(f"step {iter_num}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}")
|
|
if wandb_log:
|
|
try:
|
|
wandb.log(
|
|
{
|
|
"iter": iter_num,
|
|
"tokens": iter_num * tokens_per_iter,
|
|
"loss/train": losses["train"],
|
|
"loss/val": losses["val"],
|
|
"lr": lr,
|
|
|
|
}, step = iter_num
|
|
)
|
|
except Exception as e:
|
|
print(f"logging to wandb failed: {e}")
|
|
if losses["val"] < best_val_loss or always_save_checkpoint:
|
|
best_val_loss = losses["val"]
|
|
if iter_num > 0:
|
|
checkpoint = {
|
|
"model": raw_model.state_dict(),
|
|
"optimizer": optimizer.state_dict(),
|
|
"model_args": model_args,
|
|
"iter_num": iter_num,
|
|
"best_val_loss": best_val_loss,
|
|
"config": config,
|
|
}
|
|
print(f"saving checkpoint to {out_dir}")
|
|
torch.save(checkpoint, os.path.join(out_dir, "ckpt.pt"))
|
|
|
|
if iter_num == 0 and eval_only:
|
|
break
|
|
|
|
|
|
|
|
for micro_step in range(gradient_accumulation_steps):
|
|
|
|
with ctx:
|
|
logits = model(X, Y)
|
|
loss = raw_model.last_loss
|
|
loss = loss / gradient_accumulation_steps
|
|
|
|
X, Y = next(train_batch_iter)
|
|
|
|
scaler.scale(loss).backward()
|
|
|
|
if grad_clip != 0.0:
|
|
scaler.unscale_(optimizer)
|
|
torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
|
|
|
|
scaler.step(optimizer)
|
|
scaler.update()
|
|
|
|
optimizer.zero_grad(set_to_none=True)
|
|
|
|
|
|
t1 = time.time()
|
|
dt = t1 - t0
|
|
t0 = t1
|
|
if iter_num % log_interval == 0:
|
|
|
|
lossf = loss.item() * gradient_accumulation_steps
|
|
|
|
print(
|
|
f"{iter_num} | loss {lossf:.4f} | lr {lr:e} | {dt*1000:.2f}ms |"
|
|
)
|
|
iter_num += 1
|
|
local_iter_num += 1
|
|
|
|
|
|
if iter_num > max_iters:
|
|
break
|
|
|
|
|