Spaces:
Runtime error
Runtime error
# Copyright (c) Meta Platforms, Inc. and affiliates. | |
# All rights reserved. | |
# This source code is licensed under the license found in the | |
# LICENSE file in the root directory of this source tree. | |
# -------------------------------------------------------- | |
# References: | |
# DeiT: https://github.com/facebookresearch/deit | |
# BEiT: https://github.com/microsoft/unilm/tree/master/beit | |
# -------------------------------------------------------- | |
import builtins | |
import datetime | |
import os | |
import glob | |
import time | |
from collections import defaultdict, deque | |
from pathlib import Path | |
import subprocess | |
import torch | |
import torch.distributed as dist | |
from torch import inf | |
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler | |
from torch.distributed.fsdp import ( | |
FullyShardedDataParallel as FSDP, | |
StateDictType, | |
FullStateDictConfig, | |
) | |
from torch.distributed._shard.api import load_with_process_group | |
from fairscale.nn.model_parallel import initialize as fs_init | |
from types import TracebackType | |
from typing import Any, Optional | |
import torch | |
import torch.nn as nn | |
class SmoothedValue(object): | |
"""Track a series of values and provide access to smoothed values over a | |
window or the global series average. | |
""" | |
def __init__(self, window_size=20, fmt=None): | |
if fmt is None: | |
fmt = "{median:.4f} ({global_avg:.4f})" | |
self.deque = deque(maxlen=window_size) | |
self.total = 0.0 | |
self.count = 0 | |
self.fmt = fmt | |
def update(self, value, n=1): | |
self.deque.append(value) | |
self.count += n | |
self.total += value * n | |
def synchronize_between_processes(self): | |
""" | |
Warning: does not synchronize the deque! | |
""" | |
if not is_dist_avail_and_initialized(): | |
return | |
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') | |
dist.barrier() | |
dist.all_reduce(t) | |
t = t.tolist() | |
self.count = int(t[0]) | |
self.total = t[1] | |
def median(self): | |
d = torch.tensor(list(self.deque)) | |
return d.median().item() | |
def avg(self): | |
d = torch.tensor(list(self.deque), dtype=torch.float32) | |
return d.mean().item() | |
def global_avg(self): | |
return self.total / self.count | |
def max(self): | |
return max(self.deque) | |
def value(self): | |
return self.deque[-1] | |
def __str__(self): | |
return self.fmt.format( | |
median=self.median, | |
avg=self.avg, | |
global_avg=self.global_avg, | |
max=self.max, | |
value=self.value) | |
class MetricLogger(object): | |
def __init__(self, delimiter="\t"): | |
self.meters = defaultdict(SmoothedValue) | |
self.delimiter = delimiter | |
def update(self, **kwargs): | |
for k, v in kwargs.items(): | |
if v is None: | |
continue | |
if isinstance(v, torch.Tensor): | |
v = v.item() | |
assert isinstance(v, (float, int)) | |
self.meters[k].update(v) | |
def __getattr__(self, attr): | |
if attr in self.meters: | |
return self.meters[attr] | |
if attr in self.__dict__: | |
return self.__dict__[attr] | |
raise AttributeError("'{}' object has no attribute '{}'".format( | |
type(self).__name__, attr)) | |
def __str__(self): | |
loss_str = [] | |
for name, meter in self.meters.items(): | |
loss_str.append( | |
"{}: {}".format(name, str(meter)) | |
) | |
return self.delimiter.join(loss_str) | |
def synchronize_between_processes(self): | |
for meter in self.meters.values(): | |
meter.synchronize_between_processes() | |
def add_meter(self, name, meter): | |
self.meters[name] = meter | |
def log_every(self, iterable, print_freq, header=None, start_iter=0): | |
i = start_iter | |
if not header: | |
header = '' | |
start_time = time.time() | |
end = time.time() | |
iter_time = SmoothedValue(fmt='{avg:.4f}') | |
data_time = SmoothedValue(fmt='{avg:.4f}') | |
log_msg = [ | |
header, | |
'[{0' + '}/{1}]', | |
'{meters}', | |
'time: {time}', | |
'data: {data}' | |
] | |
if torch.cuda.is_available(): | |
log_msg.append('max mem: {memory:.0f}') | |
log_msg = self.delimiter.join(log_msg) | |
MB = 1024.0 * 1024.0 | |
for obj in iterable: | |
data_time.update(time.time() - end) | |
yield obj | |
iter_time.update(time.time() - end) | |
if i % print_freq == 0: | |
try: | |
total_len = len(iterable) | |
except: | |
total_len = "unknown" | |
if torch.cuda.is_available(): | |
print(log_msg.format( | |
i, total_len, | |
meters=str(self), | |
time=str(iter_time), data=str(data_time), | |
memory=torch.cuda.max_memory_allocated() / MB)) | |
else: | |
print(log_msg.format( | |
i, total_len, | |
meters=str(self), | |
time=str(iter_time), data=str(data_time))) | |
i += 1 | |
end = time.time() | |
total_time = time.time() - start_time | |
total_time_str = str(datetime.timedelta(seconds=int(total_time))) | |
print('{} Total time: {} ({:.4f} s / it)'.format( | |
header, total_time_str, total_time / len(iterable))) | |
def setup_for_distributed(is_master): | |
""" | |
This function disables printing when not in master process | |
""" | |
builtin_print = builtins.print | |
def print(*args, **kwargs): | |
force = kwargs.pop('force', False) | |
# force = force or (get_world_size() > 8) | |
if is_master or force: | |
now = datetime.datetime.now().time() | |
builtin_print('[{}] '.format(now), end='') # print with time stamp | |
builtin_print(*args, **kwargs) | |
builtins.print = print | |
def is_dist_avail_and_initialized(): | |
if not dist.is_available(): | |
return False | |
if not dist.is_initialized(): | |
return False | |
return True | |
def get_world_size(): | |
if not is_dist_avail_and_initialized(): | |
return 1 | |
return dist.get_world_size() | |
def get_rank(): | |
if not is_dist_avail_and_initialized(): | |
return 0 | |
return dist.get_rank() | |
def is_main_process(): | |
return get_rank() == 0 | |
def save_on_master(*args, **kwargs): | |
if is_main_process(): | |
torch.save(*args, **kwargs) | |
def init_distributed_mode(args): | |
if args.dist_on_itp: | |
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) | |
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) | |
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) | |
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']) | |
os.environ['LOCAL_RANK'] = str(args.gpu) | |
os.environ['RANK'] = str(args.rank) | |
os.environ['WORLD_SIZE'] = str(args.world_size) | |
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] | |
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: | |
args.rank = int(os.environ["RANK"]) | |
args.world_size = int(os.environ['WORLD_SIZE']) | |
args.gpu = int(os.environ['LOCAL_RANK']) | |
elif 'SLURM_PROCID' in os.environ: | |
os.environ['MASTER_PORT'] = '8994' | |
while 'MASTER_ADDR' not in os.environ or len(os.environ['MASTER_ADDR'].strip()) == 0: | |
os.environ['MASTER_ADDR'] = subprocess.check_output('sinfo -Nh -n %s | head -n 1 | awk \'{print $1}\'' % os.environ['SLURM_NODELIST'], shell=True, ).decode().strip() | |
time.sleep(1) | |
print(os.environ['MASTER_ADDR']) | |
args.world_size = int(os.environ['SLURM_NPROCS']) | |
args.rank = int(os.environ['SLURM_PROCID']) | |
args.gpu = args.rank % torch.cuda.device_count() | |
args.local_rank = args.gpu | |
os.environ['LOCAL_RANK'] = str(args.gpu) | |
os.environ['WORLD_SIZE'] = str(args.world_size) | |
os.environ['RANK'] = str(args.rank) | |
else: | |
print('Not using distributed mode') | |
setup_for_distributed(is_master=True) # hack | |
args.distributed = False | |
return | |
args.distributed = True | |
torch.cuda.set_device(args.gpu) | |
args.dist_backend = 'nccl' | |
print('| distributed init (rank {}): {}, gpu {}'.format( | |
args.rank, args.dist_url, args.gpu), flush=True) | |
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, | |
world_size=args.world_size, rank=args.rank) | |
torch.distributed.barrier() | |
setup_for_distributed(args.rank == 0) | |
def init_distributed_mode1(args): | |
if args.dist_on_itp: | |
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) | |
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) | |
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) | |
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']) | |
os.environ['LOCAL_RANK'] = str(args.gpu) | |
os.environ['RANK'] = str(args.rank) | |
os.environ['WORLD_SIZE'] = str(args.world_size) | |
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] | |
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: | |
args.rank = int(os.environ["RANK"]) | |
args.world_size = int(os.environ['WORLD_SIZE']) | |
args.gpu = int(os.environ['LOCAL_RANK']) | |
elif 'SLURM_PROCID' in os.environ: | |
args.rank = int(os.environ['SLURM_PROCID']) | |
args.gpu = args.rank % torch.cuda.device_count() | |
else: | |
print('Not using distributed mode') | |
setup_for_distributed(is_master=True) # hack | |
args.distributed = False | |
return | |
args.distributed = True | |
torch.cuda.set_device(args.gpu) | |
args.dist_backend = 'nccl' | |
print('| distributed init (rank {}): {}, gpu {}'.format( | |
args.rank, args.dist_url, args.gpu), flush=True) | |
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, | |
world_size=args.world_size, rank=args.rank) | |
torch.distributed.barrier() | |
setup_for_distributed(args.rank == 0) | |
class NativeScalerWithGradNormCount: | |
state_dict_key = "amp_scaler" | |
def __init__(self, args): | |
self._scaler = ShardedGradScaler(enabled=args.precision in ["fp16"]) | |
def __call__(self, loss, optimizer, model, clip_grad=None, parameters=None, create_graph=False, update_grad=True): | |
if update_grad: | |
self._scaler.scale(loss).backward(create_graph=create_graph) | |
if clip_grad is not None: | |
assert parameters is not None | |
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place | |
# norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) | |
norm = model.clip_grad_norm_(clip_grad) | |
else: | |
raise NotImplementedError("please set clip_grad to a very large value if you do not want to clip.") | |
self._scaler.unscale_(optimizer) | |
norm = get_grad_norm_(parameters) | |
self._scaler.step(optimizer) | |
self._scaler.update() | |
else: | |
with model.no_sync(): | |
self._scaler.scale(loss).backward(create_graph=create_graph) | |
norm = None | |
return norm | |
def state_dict(self): | |
return self._scaler.state_dict() | |
def load_state_dict(self, state_dict): | |
self._scaler.load_state_dict(state_dict) | |
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: | |
if isinstance(parameters, torch.Tensor): | |
parameters = [parameters] | |
parameters = [p for p in parameters if p.grad is not None] | |
norm_type = float(norm_type) | |
if len(parameters) == 0: | |
return torch.tensor(0.) | |
device = parameters[0].grad.device | |
if norm_type == inf: | |
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) | |
else: | |
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) | |
return total_norm | |
def save_model(output_dir, args, epoch, iteration, model, optimizer, loss_scaler, dataset_state): | |
save_dir = os.path.join(output_dir, f"epoch_{epoch}_iter_{iteration:09d}") | |
os.makedirs(save_dir, exist_ok=True) | |
with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT): | |
to_save = { | |
"model": model.state_dict(), | |
"optimizer": optimizer.state_dict(), | |
"iter": iteration, | |
"epoch": epoch, | |
"scaler": loss_scaler.state_dict(), | |
"args": args, | |
"dataset_state": dataset_state, | |
} | |
save_path = os.path.join( | |
save_dir, | |
f"checkpoint.{dist.get_rank():05d}-of-{dist.get_world_size():05d}.pth", | |
) | |
torch.save(to_save, save_path) | |
if args.save_consolidated: | |
mp_rank = fs_init.get_model_parallel_rank() | |
mp_world_size = fs_init.get_model_parallel_world_size() | |
consolidated_model_save_path = os.path.join( | |
save_dir, | |
f"consolidated.{mp_rank:02d}-of-{mp_world_size:02d}.pth", | |
) | |
with FSDP.state_dict_type( | |
model, | |
StateDictType.FULL_STATE_DICT, | |
FullStateDictConfig(rank0_only=True, offload_to_cpu=True), | |
): | |
save_dtype = { | |
"fp16": torch.float16, | |
"bf16": torch.bfloat16, | |
"tf32": torch.float32, | |
}[args.precision] | |
consolidated_model_state_dict = { | |
k: v.to(save_dtype) for k, v in model.state_dict().items() | |
} | |
if fs_init.get_data_parallel_rank() == 0: | |
torch.save(consolidated_model_state_dict, consolidated_model_save_path) | |
# remove previous ckpts | |
ckpts = glob.glob(os.path.join(output_dir, "iter_*")) + glob.glob(os.path.join(output_dir, "epoch_*")) | |
ckpts.sort() | |
if len(ckpts)>2 and not args.keep_all: | |
for ckpt in ckpts[:-2]: | |
print('del', ckpt) | |
os.system(f'rm {ckpt} -rf') | |
def load_model(args, model, optimizer, loss_scaler): | |
start_iter = 0 | |
start_epoch = 0 | |
if args.auto_resume: | |
ckpt_dirs = glob.glob(os.path.join(args.output_dir, "iter_*")) + glob.glob(os.path.join(args.output_dir, "epoch_*")) | |
ckpt_dirs.sort() | |
if len(ckpt_dirs) > 0: | |
args.resume = ckpt_dirs[-1] | |
if args.resume: | |
print("Resume checkpoint %s" % args.resume) | |
local_checkpoint_path = os.path.join( | |
args.resume, | |
f"checkpoint.{dist.get_rank():05d}-of-{dist.get_world_size():05d}.pth", | |
) | |
with load_with_process_group(fs_init.get_data_parallel_group()): | |
checkpoint = torch.load(local_checkpoint_path, map_location='cpu') | |
with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT): | |
model.load_state_dict(checkpoint['model']) | |
optimizer.load_state_dict(checkpoint['optimizer']) | |
loss_scaler.load_state_dict(checkpoint['scaler']) | |
start_iter = int(checkpoint['iter']) + 1 | |
if 'epoch' in checkpoint: | |
start_epoch = int(checkpoint['epoch']) | |
return start_epoch, start_iter | |
def all_reduce_mean(x): | |
world_size = get_world_size() | |
if world_size > 1: | |
if isinstance(x, torch.Tensor): | |
x_reduce = x.clone().cuda() | |
else: | |
x_reduce = torch.tensor(x).cuda() | |
dist.all_reduce(x_reduce) | |
x_reduce /= world_size | |
return x_reduce.item() | |
else: | |
return x | |
def add_weight_decay(model, weight_decay=1e-5, skip_list=()): | |
decay = [] | |
no_decay = [] | |
for name, param in model.named_parameters(): | |
if not param.requires_grad: | |
continue # frozen weights | |
#if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: | |
if name.endswith(".bias") or name.endswith("norm.weight"): | |
no_decay.append(param) | |
else: | |
decay.append(param) | |
return [ | |
{'params': no_decay, 'weight_decay': 0.}, | |
{'params': decay, 'weight_decay': weight_decay}] | |
class default_tensor_type: | |
_tensor_type_stack = [(torch.float, "cpu")] | |
def __init__( | |
self, | |
dtype: Optional[torch.dtype] = None, | |
device: Optional[str] = None, | |
) -> None: | |
# Only limited combinations are supported. | |
assert device is None or device in ["cpu", "cuda"] | |
assert dtype is None or dtype in [torch.float, torch.bfloat16, torch.half] | |
self.dtype, self.device = dtype, device | |
def __enter__(self) -> None: | |
dtype, device = self.dtype, self.device | |
if dtype is None: | |
dtype = default_tensor_type._tensor_type_stack[-1][0] | |
if device is None: | |
device = default_tensor_type._tensor_type_stack[-1][1] | |
default_tensor_type._tensor_type_stack.append((dtype, device)) | |
# We use all 3 calls since the new apis (set_default_device, set_default_dtype) | |
# seems to be ineffective sometimes (e.g., set_default_device is ineffective to | |
# torch.Tensor calls). | |
torch.set_default_tensor_type(default_tensor_type.get_tensor_type(dtype, device)) | |
torch.set_default_device(device) | |
torch.set_default_dtype(dtype) | |
def __exit__( | |
self, | |
exc_type: Optional[type[BaseException]], | |
exc_val: Optional[BaseException], | |
exc_tb: Optional[TracebackType], | |
) -> None: | |
default_tensor_type._tensor_type_stack.pop() | |
dtype, device = default_tensor_type._tensor_type_stack[-1] | |
torch.set_default_tensor_type(default_tensor_type.get_tensor_type(dtype, device)) | |
torch.set_default_device(device) | |
torch.set_default_dtype(dtype) | |
def get_tensor_type(dtype: torch.dtype, device: str) -> Any: | |
return { | |
(torch.float, "cpu"): torch.FloatTensor, | |
(torch.bfloat16, "cpu"): torch.BFloat16Tensor, | |
(torch.half, "cpu"): torch.HalfTensor, | |
(torch.float, "cuda"): torch.cuda.FloatTensor, | |
(torch.bfloat16, "cuda"): torch.cuda.BFloat16Tensor, | |
(torch.half, "cuda"): torch.cuda.HalfTensor, | |
}[(dtype, device)] | |