Spaces:
Running
on
Zero
Running
on
Zero
File size: 21,369 Bytes
28c256d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import functools
import os
import subprocess
from typing import Callable, Optional, Tuple, Union
import numpy as np
import torch
import torch.multiprocessing as mp
from torch import Tensor
from torch import distributed as torch_dist
from torch.distributed import ProcessGroup
from mmengine.device import is_mlu_available, is_npu_available
from collections.abc import Iterable, Mapping
_LOCAL_PROCESS_GROUP = None
def is_distributed() -> bool:
"""Return True if distributed environment has been initialized."""
return torch_dist.is_available() and torch_dist.is_initialized()
def get_local_group() -> Optional[ProcessGroup]:
"""Return local process group."""
if not is_distributed():
return None
if _LOCAL_PROCESS_GROUP is None:
raise RuntimeError('Local process group is not created, please use '
'`init_local_group` to setup local process group.')
return _LOCAL_PROCESS_GROUP
def get_default_group() -> Optional[ProcessGroup]:
"""Return default process group."""
return torch_dist.distributed_c10d._get_default_group()
def infer_launcher():
if 'WORLD_SIZE' in os.environ:
return 'pytorch'
elif 'SLURM_NTASKS' in os.environ:
return 'slurm'
elif 'OMPI_COMM_WORLD_LOCAL_RANK' in os.environ:
return 'mpi'
else:
return 'none'
def init_dist(launcher,
backend='nccl',
init_backend='torch',
**kwargs) -> None:
"""Initialize distributed environment.
Args:
launcher (str): Way to launcher multi processes. Supported launchers
are 'pytorch', 'mpi' and 'slurm'.
backend (str): Communication Backends. Supported backends are 'nccl',
'gloo' and 'mpi'. Defaults to 'nccl'.
**kwargs: keyword arguments are passed to ``init_process_group``.
"""
timeout = kwargs.get('timeout', None)
if timeout is not None:
# If a timeout (in seconds) is specified, it must be converted
# to a timedelta object before forwarding the call to
# the respective backend, because they expect a timedelta object.
try:
kwargs['timeout'] = datetime.timedelta(seconds=timeout)
except TypeError as exception:
raise TypeError(
f'Timeout for distributed training must be provided as '
f"timeout in seconds, but we've received the type "
f'{type(timeout)}. Please specify the timeout like this: '
f"dist_cfg=dict(backend='nccl', timeout=1800)") from exception
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
if launcher == 'pytorch':
_init_dist_pytorch(backend, init_backend=init_backend, **kwargs)
elif launcher == 'mpi':
_init_dist_mpi(backend, **kwargs)
elif launcher == 'slurm':
_init_dist_slurm(backend, init_backend=init_backend, **kwargs)
else:
raise ValueError(f'Invalid launcher type: {launcher}')
def _init_dist_pytorch(backend, init_backend='torch', **kwargs) -> None:
"""Initialize distributed environment with PyTorch launcher.
Args:
backend (str): Backend of torch.distributed. Supported backends are
'nccl', 'gloo' and 'mpi'. Defaults to 'nccl'.
**kwargs: keyword arguments are passed to ``init_process_group``.
"""
rank = int(os.environ['RANK'])
if is_mlu_available():
import torch_mlu # noqa: F401
local_rank = int(os.environ['LOCAL_RANK'])
torch.mlu.set_device(local_rank)
torch_dist.init_process_group(
backend='cncl',
rank=rank,
world_size=int(os.environ['WORLD_SIZE']),
**kwargs)
elif is_npu_available():
import torch_npu # noqa: F401
torch.npu.set_device(rank)
torch_dist.init_process_group(
backend='hccl',
rank=rank,
world_size=int(os.environ['WORLD_SIZE']),
**kwargs)
else:
# LOCAL_RANK is set by `torch.distributed.launch` since PyTorch 1.1
local_rank = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
if init_backend == 'torch':
torch_dist.init_process_group(backend=backend, **kwargs)
elif init_backend == 'deepspeed':
import deepspeed
deepspeed.init_distributed(dist_backend=backend, **kwargs)
elif init_backend == 'colossalai':
import colossalai
colossalai.launch_from_torch(backend=backend, **kwargs)
else:
raise ValueError(
'supported "init_backend" is "torch" or "deepspeed", '
f'but got {init_backend}')
def _init_dist_mpi(backend, **kwargs) -> None:
"""Initialize distributed environment with MPI launcher.
Args:
backend (str): Backend of torch.distributed. Supported backends are
'nccl', 'gloo' and 'mpi'. Defaults to 'nccl'.
**kwargs: keyword arguments are passed to ``init_process_group``.
"""
if backend == 'smddp':
try:
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
'Please use an Amazon SageMaker DLC to access smdistributed: '
'https://github.com/aws/deep-learning-containers/blob/master'
'/available_images.md#sagemaker-framework-containers'
'-sm-support-only') from e
local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
torch.cuda.set_device(local_rank)
if 'MASTER_PORT' not in os.environ:
# 29500 is torch.distributed default port
os.environ['MASTER_PORT'] = '29500'
if 'MASTER_ADDR' not in os.environ:
raise KeyError('The environment variable MASTER_ADDR is not set')
os.environ['WORLD_SIZE'] = os.environ['OMPI_COMM_WORLD_SIZE']
os.environ['RANK'] = os.environ['OMPI_COMM_WORLD_RANK']
torch_dist.init_process_group(backend=backend, **kwargs)
def _init_dist_slurm(backend,
port=None,
init_backend='torch',
**kwargs) -> None:
"""Initialize slurm distributed training environment.
If argument ``port`` is not specified, then the master port will be system
environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
environment variable, then a default port ``29500`` will be used.
Args:
backend (str): Backend of torch.distributed.
port (int, optional): Master port. Defaults to None.
"""
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
# Not sure when this environment variable could be None, so use a fallback
local_rank_env = os.environ.get('SLURM_LOCALID', None)
if local_rank_env is not None:
local_rank = int(local_rank_env)
else:
num_gpus = torch.cuda.device_count()
local_rank = proc_id % num_gpus
torch.cuda.set_device(local_rank)
addr = subprocess.getoutput(
f'scontrol show hostname {node_list} | head -n1')
# specify master port
if port is not None:
os.environ['MASTER_PORT'] = str(port)
elif 'MASTER_PORT' in os.environ:
pass # use MASTER_PORT in the environment variable
else:
# 29500 is torch.distributed default port
os.environ['MASTER_PORT'] = '29500'
# use MASTER_ADDR in the environment variable if it already exists
if 'MASTER_ADDR' not in os.environ:
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['LOCAL_RANK'] = str(local_rank)
os.environ['RANK'] = str(proc_id)
if init_backend == 'torch':
torch_dist.init_process_group(backend=backend, **kwargs)
elif init_backend == 'deepspeed':
import deepspeed
deepspeed.init_distributed(dist_backend=backend, **kwargs)
elif init_backend == 'colossalai':
import colossalai
colossalai.launch_from_slurm(
backend=backend,
host=os.environ['MASTER_ADDR'],
port=os.environ['MASTER_PORT'],
**kwargs,
)
else:
raise ValueError('supported "init_backend" is "torch" or "deepspeed", '
f'but got {init_backend}')
def init_local_group(node_rank: int, num_gpus_per_node: int):
"""Setup the local process group.
Setup a process group which only includes processes that on the same
machine as the current process.
The code is modified from
https://github.com/facebookresearch/detectron2/blob/main/detectron2/engine/launch.py
Args:
node_rank (int): Rank of machines used for training.
num_gpus_per_node (int): Number of gpus used for training in a single
machine.
""" # noqa: W501
global _LOCAL_PROCESS_GROUP
assert _LOCAL_PROCESS_GROUP is None
ranks = list(
range(node_rank * num_gpus_per_node,
(node_rank + 1) * num_gpus_per_node))
_LOCAL_PROCESS_GROUP = torch_dist.new_group(ranks)
def get_backend(group: Optional[ProcessGroup] = None) -> Optional[str]:
"""Return the backend of the given process group.
Note:
Calling ``get_backend`` in non-distributed environment will return
None.
Args:
group (ProcessGroup, optional): The process group to work on. The
default is the general main process group. If another specific
group is specified, the calling process must be part of
:attr:`group`. Defaults to None.
Returns:
str or None: Return the backend of the given process group as a lower
case string if in distributed environment, otherwise None.
"""
if is_distributed():
# handle low versions of torch like 1.5.0 which does not support
# passing in None for group argument
if group is None:
group = get_default_group()
return torch_dist.get_backend(group)
else:
return None
def get_world_size(group: Optional[ProcessGroup] = None) -> int:
"""Return the number of the given process group.
Note:
Calling ``get_world_size`` in non-distributed environment will return
1.
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used. Defaults to None.
Returns:
int: Return the number of processes of the given process group if in
distributed environment, otherwise 1.
"""
if is_distributed():
# handle low versions of torch like 1.5.0 which does not support
# passing in None for group argument
if group is None:
group = get_default_group()
return torch_dist.get_world_size(group)
else:
return 1
def get_rank(group: Optional[ProcessGroup] = None) -> int:
"""Return the rank of the given process group.
Rank is a unique identifier assigned to each process within a distributed
process group. They are always consecutive integers ranging from 0 to
``world_size``.
Note:
Calling ``get_rank`` in non-distributed environment will return 0.
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used. Defaults to None.
Returns:
int: Return the rank of the process group if in distributed
environment, otherwise 0.
"""
if is_distributed():
# handle low versions of torch like 1.5.0 which does not support
# passing in None for group argument
if group is None:
group = get_default_group()
return torch_dist.get_rank(group)
else:
return 0
def get_local_size() -> int:
"""Return the number of the current node.
Returns:
int: Return the number of processes in the current node if in
distributed environment, otherwise 1.
"""
if not is_distributed():
return 1
if _LOCAL_PROCESS_GROUP is None:
raise RuntimeError('Local process group is not created, please use '
'`init_local_group` to setup local process group.')
return torch_dist.get_world_size(_LOCAL_PROCESS_GROUP)
def get_local_rank() -> int:
"""Return the rank of current process in the current node.
Returns:
int: Return the rank of current process in the current node if in
distributed environment, otherwise 0
"""
if not is_distributed():
return 0
if _LOCAL_PROCESS_GROUP is None:
raise RuntimeError('Local process group is not created, please use '
'`init_local_group` to setup local process group.')
return torch_dist.get_rank(_LOCAL_PROCESS_GROUP)
def get_dist_info(group: Optional[ProcessGroup] = None) -> Tuple[int, int]:
"""Get distributed information of the given process group.
Note:
Calling ``get_dist_info`` in non-distributed environment will return
(0, 1).
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used. Defaults to None.
Returns:
tuple[int, int]: Return a tuple containing the ``rank`` and
``world_size``.
"""
world_size = get_world_size(group)
rank = get_rank(group)
return rank, world_size
def is_main_process(group: Optional[ProcessGroup] = None) -> bool:
"""Whether the current rank of the given process group is equal to 0.
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used. Defaults to None.
Returns:
bool: Return True if the current rank of the given process group is
equal to 0, otherwise False.
"""
return get_rank(group) == 0
def master_only(func: Callable) -> Callable:
"""Decorate those methods which should be executed in master process.
Args:
func (callable): Function to be decorated.
Returns:
callable: Return decorated function.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if is_main_process():
return func(*args, **kwargs)
return wrapper
def barrier(group: Optional[ProcessGroup] = None) -> None:
"""Synchronize all processes from the given process group.
This collective blocks processes until the whole group enters this
function.
Note:
Calling ``barrier`` in non-distributed environment will do nothing.
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used. Defaults to None.
"""
if is_distributed():
# handle low versions of torch like 1.5.0 which does not support
# passing in None for group argument
if group is None:
group = get_default_group()
torch_dist.barrier(group)
def get_data_device(data: Union[Tensor, Mapping, Iterable]) -> torch.device:
"""Return the device of ``data``.
If ``data`` is a sequence of Tensor, all items in ``data`` should have a
same device type.
If ``data`` is a dict whose values are Tensor, all values should have a
same device type.
Args:
data (Tensor or Sequence or dict): Inputs to be inferred the device.
Returns:
torch.device: The device of ``data``.
Examples:
>>> import torch
>>> from mmengine.dist import cast_data_device
>>> # data is a Tensor
>>> data = torch.tensor([0, 1])
>>> get_data_device(data)
device(type='cpu')
>>> # data is a list of Tensor
>>> data = [torch.tensor([0, 1]), torch.tensor([2, 3])]
>>> get_data_device(data)
device(type='cpu')
>>> # data is a dict
>>> data = {'key1': torch.tensor([0, 1]), 'key2': torch.tensor([0, 1])}
>>> get_data_device(data)
device(type='cpu')
"""
if isinstance(data, Tensor):
return data.device
elif isinstance(data, Mapping):
pre = None
for v in data.values():
cur = get_data_device(v)
if pre is None:
pre = cur
else:
if cur != pre:
raise ValueError(
'device type in data should be consistent, but got '
f'{cur} and {pre}')
if pre is None:
raise ValueError('data should not be empty.')
return pre
elif isinstance(data, Iterable) and not isinstance(data, str):
pre = None
for item in data:
cur = get_data_device(item)
if pre is None:
pre = cur
else:
if cur != pre:
raise ValueError(
'device type in data should be consistent, but got '
f'{cur} and {pre}')
if pre is None:
raise ValueError('data should not be empty.')
return pre
else:
raise TypeError('data should be a Tensor, sequence of tensor or dict, '
f'but got {data}')
def get_comm_device(group: Optional[ProcessGroup] = None) -> torch.device:
"""Return the device for communication among groups.
Args:
group (ProcessGroup, optional): The process group to work on.
Returns:
torch.device: The device of backend.
"""
backend = get_backend(group)
if backend == 'hccl':
import torch_npu # noqa: F401
return torch.device('npu', torch.npu.current_device())
elif backend == torch_dist.Backend.NCCL:
return torch.device('cuda', torch.cuda.current_device())
elif backend == 'cncl':
import torch_mlu # noqa: F401
return torch.device('mlu', torch.mlu.current_device())
elif backend == 'smddp':
return torch.device('cuda', torch.cuda.current_device())
else:
# GLOO and MPI backends use cpu device by default
return torch.device('cpu')
def cast_data_device(
data: Union[Tensor, Mapping, Iterable],
device: torch.device,
out: Optional[Union[Tensor, Mapping, Iterable]] = None
) -> Union[Tensor, Mapping, Iterable]:
"""Recursively convert Tensor in ``data`` to ``device``.
If ``data`` has already on the ``device``, it will not be casted again.
Args:
data (Tensor or list or dict): Inputs to be casted.
device (torch.device): Destination device type.
out (Tensor or list or dict, optional): If ``out`` is specified, its
value will be equal to ``data``. Defaults to None.
Returns:
Tensor or list or dict: ``data`` was casted to ``device``.
"""
if out is not None:
if type(data) != type(out):
raise TypeError(
'out should be the same type with data, but got data is '
f'{type(data)} and out is {type(data)}')
if isinstance(out, set):
raise TypeError('out should not be a set')
if isinstance(data, Tensor):
if get_data_device(data) == device:
data_on_device = data
else:
data_on_device = data.to(device)
if out is not None:
# modify the value of out inplace
out.copy_(data_on_device) # type: ignore
return data_on_device
elif isinstance(data, Mapping):
data_on_device = {}
if out is not None:
data_len = len(data)
out_len = len(out) # type: ignore
if data_len != out_len:
raise ValueError('length of data and out should be same, '
f'but got {data_len} and {out_len}')
for k, v in data.items():
data_on_device[k] = cast_data_device(v, device,
out[k]) # type: ignore
else:
for k, v in data.items():
data_on_device[k] = cast_data_device(v, device)
if len(data_on_device) == 0:
raise ValueError('data should not be empty')
# To ensure the type of output as same as input, we use `type(data)`
# to wrap the output
return type(data)(data_on_device) # type: ignore
elif isinstance(data, Iterable) and not isinstance(
data, str) and not isinstance(data, np.ndarray):
data_on_device = []
if out is not None:
for v1, v2 in zip(data, out):
data_on_device.append(cast_data_device(v1, device, v2))
else:
for v in data:
data_on_device.append(cast_data_device(v, device))
if len(data_on_device) == 0:
raise ValueError('data should not be empty')
return type(data)(data_on_device) # type: ignore
else:
raise TypeError('data should be a Tensor, list of tensor or dict, '
f'but got {data}')
|