library
stringclasses 1
value | test_file
stringclasses 785
values | test_function
stringlengths 1
295
| before
stringlengths 0
448k
| after
stringlengths 0
487k
| context_before
stringclasses 947
values | context_after
stringlengths 0
16.3k
| commit_before
stringclasses 1
value | commit_after
stringclasses 1
value | change_type
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|---|
torch
|
test/distributed/_composable/fully_shard/test_fully_shard_init.py
|
test_raise_scalar_parameter
|
if __name__ == "__main__":
run_tests()
|
def test_raise_scalar_parameter(self):
"""Tests raising an exception when the model has scalar parameters."""
device = torch.device("cuda")
model = CompositeParamModel(device=device)
model.register_parameter("scalar_p", nn.Parameter(torch.tensor(1.0).cuda()))
with self.assertRaisesRegex(
ValueError, "Change scalar_p to a 1D tensor with numel equal to 1."
):
fully_shard(model)
|
import copy
import sys
from typing import Optional
import torch
import torch.distributed as dist
import torch.distributed.fsdp._traversal_utils as traversal_utils
import torch.nn as nn
from torch.distributed._composable import fully_shard
from torch.distributed.fsdp import BackwardPrefetch, FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import _is_fsdp_flattened, clean_tensor_name
from torch.distributed.fsdp.wrap import _Policy, CustomPolicy, ModuleWrapPolicy
from torch.testing._internal.common_dist_composable import (
CompositeParamModel,
FakeSequential,
NestedSequentialModel,
UnitModule,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
class TestInitialization(FSDPTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/_composable/fully_shard/test_fully_shard_model_checkpoint.py
|
test_state_dict_save_load_flow
|
def test_state_dict_save_load_flow(self):
"""
E2E test of save + load with rank0_only + CPU offload for TransformerWithSharedParams
on the composable path.
"""
self.run_subtests(
{"ignore_modules": [False, True]},
self._test_save_dict_save_load_flow,
)
|
def test_state_dict_save_load_flow(self):
"""
E2E test of save + load with rank0_only + CPU offload for TransformerWithSharedParams
on the composable path.
"""
self.run_subtests(
{"ignore_modules": [False, True], "sharded_state_dict": [False, True]},
self._test_save_dict_save_load_flow,
)
|
import copy
import itertools
import sys
from typing import Dict
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed._composable import fully_shard
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer
from torch.testing._internal.common_dist_composable import (
CompositeParamModel,
UnitModule,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
_zero_model,
CUDAInitMode,
FSDPInitMode,
FSDPTest,
TransformerWithSharedParams,
)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
class TestModelCheckpointing(FSDPTest):
|
import copy
import itertools
import sys
from typing import Dict
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed._composable import fully_shard
from torch.distributed._state_dict_utils import _gather_state_dict
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer
from torch.testing._internal.common_dist_composable import (
CompositeParamModel,
UnitModule,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
_zero_model,
CUDAInitMode,
FSDPInitMode,
FSDPTest,
TransformerWithSharedParams,
)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
class TestModelCheckpointing(FSDPTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_composable/fully_shard/test_fully_shard_model_checkpoint.py
|
test_full_state_dict_save_load_mixed_sharding
|
def test_full_state_dict_save_load_mixed_sharding(self):
"""
Tests that the full state dict saved from a module with ``fully_shard``
and ``no_shard`` applied on the module matches that of an equivalent
local module. Also ensures that this state_dict can be reloaded into
a composable module and is equivalent to the original composable module.
"""
local_model = CompositeParamModel(device=torch.device("cuda"))
def _create_mixed_shard_on_model(mod: nn.Module):
fully_shard(mod.u1)
fully_shard(mod, strategy=ShardingStrategy.NO_SHARD)
return mod
save_composable = copy.deepcopy(local_model)
save_composable = _create_mixed_shard_on_model(save_composable)
local_sd = local_model.state_dict()
composable_sd = save_composable.state_dict()
self._check_state_dict_parity(local_sd, composable_sd)
# Validate load
load_composable = copy.deepcopy(local_model)
load_composable = _create_mixed_shard_on_model(load_composable)
_zero_model(load_composable, summon_full=False)
for p in load_composable.parameters():
self.assertEqual(0, p.sum())
sd = {k: v.clone() for k, v in composable_sd.items()}
load_composable.load_state_dict(sd)
self._check_model_parity(load_composable, save_composable)
|
import copy
import itertools
import sys
from typing import Dict
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed._composable import fully_shard
from torch.distributed._state_dict_utils import _gather_state_dict
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer
from torch.testing._internal.common_dist_composable import (
CompositeParamModel,
UnitModule,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
_zero_model,
CUDAInitMode,
FSDPInitMode,
FSDPTest,
TransformerWithSharedParams,
)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
class TestModelCheckpointing(FSDPTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/fully_shard/test_fully_shard_model_checkpoint.py
|
_create_mixed_shard_on_model
|
def _create_mixed_shard_on_model(mod: nn.Module):
fully_shard(mod.u1)
fully_shard(mod, strategy=ShardingStrategy.NO_SHARD)
return mod
save_composable = copy.deepcopy(local_model)
save_composable = _create_mixed_shard_on_model(save_composable)
local_sd = local_model.state_dict()
composable_sd = save_composable.state_dict()
self._check_state_dict_parity(local_sd, composable_sd)
# Validate load
load_composable = copy.deepcopy(local_model)
load_composable = _create_mixed_shard_on_model(load_composable)
_zero_model(load_composable, summon_full=False)
for p in load_composable.parameters():
self.assertEqual(0, p.sum())
sd = {k: v.clone() for k, v in composable_sd.items()}
load_composable.load_state_dict(sd)
self._check_model_parity(load_composable, save_composable)
|
import copy
import itertools
import sys
from typing import Dict
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed._composable import fully_shard
from torch.distributed._state_dict_utils import _gather_state_dict
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer
from torch.testing._internal.common_dist_composable import (
CompositeParamModel,
UnitModule,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
_zero_model,
CUDAInitMode,
FSDPInitMode,
FSDPTest,
TransformerWithSharedParams,
)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/fully_shard/test_fully_shard_optim_checkpoint.py
|
test_optim_state_dict_submodule_fully_shard
|
def test_optim_state_dict_submodule_fully_shard(self):
orig_model = CompositeParamModel(device=torch.device("cuda"))
composable_model = copy.deepcopy(orig_model)
fully_shard(composable_model.u1)
fully_shard(composable_model.u2)
composable_optim = torch.optim.Adam(composable_model.parameters(), lr=1e-2)
orig_model = FSDP(orig_model)
orig_optim = torch.optim.Adam(orig_model.parameters(), lr=1e-2)
self._test_optim_state_save_load(
orig_model, orig_optim, composable_model, composable_optim
)
|
def test_optim_state_dict_submodule_fully_shard(self):
orig_model = CompositeParamModel(device=torch.device("cuda"))
composable_model = copy.deepcopy(orig_model)
fully_shard(composable_model.u1)
fully_shard(composable_model.u2)
composable_optim = _optim_cls(composable_model.parameters(), lr=_optim_lr)
orig_model = FSDP(orig_model)
orig_optim = _optim_cls(orig_model.parameters(), lr=_optim_lr)
self._test_optim_state_save_load(
orig_model, orig_optim, composable_model, composable_optim
)
|
import copy
import itertools
import sys
import torch
import torch.distributed as dist
from torch.distributed._composable import fully_shard
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.testing._internal.common_dist_composable import (
CompositeParamModel,
UnitModule,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
class TestOptimStateCheckpointing(FSDPTest):
|
import copy
import itertools
import sys
import torch
import torch.distributed as dist
from torch.distributed._composable import fully_shard
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.testing._internal.common_dist_composable import (
CompositeParamModel,
UnitModule,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
_optim_cls = torch.optim.Adam
_optim_lr = 1e-2
class TestOptimStateCheckpointing(FSDPTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_composable/fully_shard/test_fully_shard_runtime.py
|
test_training
|
def test_training(self):
"""Tests training (forward, backward, optimizer)."""
self.run_subtests(
{
"fsdp_wrap_mode": [
FSDPWrapMode.AUTO_WRAP,
FSDPWrapMode.MANUAL_WRAP,
]
},
self._test_training,
)
|
def test_training(self):
"""Tests training (forward, backward, optimizer)."""
self.run_subtests(
{
"fsdp_wrap_mode": [
FSDPWrapMode.AUTO_WRAP,
FSDPWrapMode.MANUAL_WRAP,
],
"sharding_strategy": [
ShardingStrategy.FULL_SHARD,
ShardingStrategy.SHARD_GRAD_OP,
ShardingStrategy.NO_SHARD,
ShardingStrategy.HYBRID_SHARD,
],
},
self._test_training,
)
|
import contextlib
import copy
import functools
import sys
from enum import auto, Enum
from typing import Callable, Iterable, List, Tuple
import torch
import torch.distributed as dist
import torch.distributed.fsdp._traversal_utils as traversal_utils
import torch.nn as nn
from torch.distributed._composable import fully_shard
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import _FSDPState
from torch.distributed.fsdp.flat_param import _HandlesKey, FlatParamHandle
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.testing._internal.common_dist_composable import (
CompositeParamModel,
UnitModule,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
class TestRuntime(FSDPTest):
|
import contextlib
import copy
import functools
import sys
from enum import auto, Enum
from typing import Callable, List, Tuple
import torch
import torch.distributed as dist
import torch.distributed.fsdp._traversal_utils as traversal_utils
import torch.nn as nn
from torch.distributed._composable import fully_shard
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, ShardingStrategy
from torch.distributed.fsdp._common_utils import _FSDPState
from torch.distributed.fsdp._flat_param import FlatParamHandle
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.testing._internal.common_dist_composable import (
CompositeParamModel,
UnitModule,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
class TestRuntime(FSDPTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_composable/test_composability/test_2d_composability.py
|
test_2d_optim_state_dict
|
def test_2d_optim_state_dict(self, is_even_sharded_model):
simple_model = SimpleModel if is_even_sharded_model else SimpleModelUneven
# Create a model without wrapper
torch.manual_seed(0)
no_wrap_model = simple_model().cuda(self.rank)
no_wrap_state_dict = no_wrap_model.state_dict()
no_wrap_optim = torch.optim.Adam(no_wrap_model.parameters(), lr=0.01)
no_wrap_model(no_wrap_model.get_input().cuda(self.rank)).sum().backward()
no_wrap_optim.step()
no_wrap_osd = get_optimizer_state_dict(no_wrap_model, optimizers=no_wrap_optim)
# Create a model and sharded it with 2D FSDP + TP
torch.manual_seed(0)
mesh_2d = init_device_mesh(
self.device_type, (2, self.world_size // 2), mesh_dim_names=("dp", "tp")
)
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
model_2d = parallelize_module(
simple_model().cuda(), mesh_2d["tp"], parallelize_plan
)
model_2d = FSDP(model_2d, device_mesh=mesh_2d["dp"], use_orig_params=True)
FSDP.set_state_dict_type(
model_2d,
StateDictType.SHARDED_STATE_DICT,
)
optim_2d = torch.optim.Adam(model_2d.parameters(), lr=0.01)
model_2d(model_2d.get_input().cuda(self.rank)).sum().backward()
optim_2d.step()
optim_2d_osd = get_optimizer_state_dict(model_2d, optimizers=optim_2d)
ref_optim_2d_osd = deepcopy(optim_2d_osd)
no_wrap_osd_states = no_wrap_osd["state"]
optim_2d_osd_states = optim_2d_osd["state"]
self.assertEqual(len(no_wrap_osd_states), len(optim_2d_osd_states))
self.assertEqual(no_wrap_osd_states.keys(), optim_2d_osd_states.keys())
for fqn, states in no_wrap_osd_states.items():
dist_states = optim_2d_osd_states.get(fqn)
for state_name, state in states.items():
dist_state = dist_states.get(state_name)
# If a state is DTensor, we all gather it in both DP and TP dimension to
# compare with no_wrap state.
if isinstance(dist_state, DTensor):
dist_state = (
dist_state.cuda()
.redistribute(placements=(Replicate(), Replicate()))
.to_local()
)
self.assertTrue(isinstance(dist_state, torch.Tensor))
self.assertTrue(torch.allclose(state, dist_state))
# Update the parameters 2d optim states will be different from ref_optim_state_dict.
model_2d(model_2d.get_input().cuda(self.rank)).sum().backward()
optim_2d.step()
set_optimizer_state_dict(
model_2d, optimizers=optim_2d, optim_state_dict=ref_optim_2d_osd
)
new_optim_2d_osd = get_optimizer_state_dict(model_2d, optimizers=optim_2d)
ref_optim_2d_osd_states = ref_optim_2d_osd["state"]
new_optim_2d_osd_states = optim_2d_osd["state"]
# Compare the new optim state dict after load with the reference one
self.assertEqual(len(ref_optim_2d_osd_states), len(new_optim_2d_osd_states))
self.assertEqual(ref_optim_2d_osd_states.keys(), new_optim_2d_osd_states.keys())
for fqn, states in ref_optim_2d_osd_states.items():
new_states = new_optim_2d_osd_states.get(fqn)
for state_name, state in states.items():
new_state = new_states.get(state_name)
if isinstance(new_state, DTensor):
self.assertEqual(new_state.placements, state.placements)
self.assertEqual(new_state.device_mesh, state.device_mesh)
self.assertTrue(
torch.allclose(new_state.to_local(), state.to_local())
)
else:
self.assertEqual(new_state, state)
|
import copy
import functools
import io
from copy import deepcopy
from typing import List, Type
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._composable import replicate
from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard
from torch.distributed._tensor import DTensor, init_device_mesh, Replicate, Shard
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_optimizer_state_dict,
set_model_state_dict,
set_optimizer_state_dict,
StateDictOptions,
)
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import (
_get_module_fsdp_state,
clean_tensor_name,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP, MLPStack
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skipIfRocm,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
Transformer,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
class TestNew2dParallelStateDict(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_pp_composability.py
|
__init__
|
def __init__(self, d_hid: int):
super().__init__()
self.net1 = torch.nn.Linear(d_hid, d_hid)
self.relu = torch.nn.ReLU()
self.net2 = torch.nn.Linear(d_hid, d_hid)
|
import copy
import os
import torch
import torch.nn as nn
from torch.distributed._composable.fsdp.fully_shard import (
fully_shard,
MixedPrecisionPolicy,
)
from torch.distributed._tensor import DTensor
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.pipelining import PipelineStage
from torch.distributed.pipelining.schedules import (
PipelineScheduleSingle,
Schedule1F1B,
ScheduleFlexibleInterleaved1F1B,
ScheduleGPipe,
ScheduleInterleaved1F1B,
ScheduleInterleavedZeroBubble,
ScheduleLoopedBFS,
)
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skip_but_pass_in_sandcastle_if,
)
class MLPModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_pp_composability.py
|
forward
|
def forward(self, x):
x = self.net1(x)
x = self.relu(x)
x = self.net2(x)
return x
|
import copy
import os
import torch
import torch.nn as nn
from torch.distributed._composable.fsdp.fully_shard import (
fully_shard,
MixedPrecisionPolicy,
)
from torch.distributed._tensor import DTensor
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.pipelining import PipelineStage
from torch.distributed.pipelining.schedules import (
PipelineScheduleSingle,
Schedule1F1B,
ScheduleFlexibleInterleaved1F1B,
ScheduleGPipe,
ScheduleInterleaved1F1B,
ScheduleInterleavedZeroBubble,
ScheduleLoopedBFS,
)
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skip_but_pass_in_sandcastle_if,
)
class MLPModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_pp_composability.py
|
setUp
|
def setUp(self):
super().setUp()
self._spawn_processes()
|
import copy
import os
import torch
import torch.nn as nn
from torch.distributed._composable.fsdp.fully_shard import (
fully_shard,
MixedPrecisionPolicy,
)
from torch.distributed._tensor import DTensor
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.pipelining import PipelineStage
from torch.distributed.pipelining.schedules import (
PipelineScheduleSingle,
Schedule1F1B,
ScheduleFlexibleInterleaved1F1B,
ScheduleGPipe,
ScheduleInterleaved1F1B,
ScheduleInterleavedZeroBubble,
ScheduleLoopedBFS,
)
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skip_but_pass_in_sandcastle_if,
)
class ComposabilityTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_2d_composability.py
|
test_fully_shard_tp_2d_set_full_state_dict
|
def test_fully_shard_tp_2d_set_full_state_dict(self):
dummy_model = SimpleModel().cuda()
mesh_2d = init_device_mesh(
"cuda",
(2, self.world_size // 2),
mesh_dim_names=("dp", "tp"),
)
tp_mesh = mesh_2d["tp"]
dp_mesh = mesh_2d["dp"]
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
"net3": ColwiseParallel(),
}
model = parallelize_module(dummy_model, tp_mesh, parallelize_plan)
fully_shard(model, mesh=dp_mesh)
optim = torch.optim.Adam(model.parameters(), lr=0.01)
model(model.get_input()).sum().backward()
optim.step()
# ref_msd, ref_osd are both the default sharded state dict
ref_msd = copy.deepcopy(get_model_state_dict(model))
ref_osd = copy.deepcopy(get_optimizer_state_dict(model, optimizers=optim))
options = StateDictOptions(
full_state_dict=True, cpu_offload=True, broadcast_from_rank0=True
)
full_msd = get_model_state_dict(model, options=options)
full_osd = get_optimizer_state_dict(model, optimizers=optim, options=options)
# load full_msd and full_osd into model and optim.
# this loads the slice of full tensor into each rank's local DTensor.
set_model_state_dict(model, full_msd, options=options)
set_optimizer_state_dict(
model, optimizers=optim, optim_state_dict=full_osd, options=options
)
# check after setting full state dict, the model and optim default sharded state dict
# are the same as the initial default sharded state dict.
new_msd = get_model_state_dict(model)
new_osd = get_optimizer_state_dict(model, optimizers=optim)
self.assertEqual(ref_msd, new_msd)
self.assertEqual(ref_osd, new_osd)
|
import copy
import functools
import io
from copy import deepcopy
from typing import List, Type
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._composable import replicate
from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard
from torch.distributed._tensor import DTensor, init_device_mesh, Replicate, Shard
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_optimizer_state_dict,
set_model_state_dict,
set_optimizer_state_dict,
StateDictOptions,
)
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import (
_get_module_fsdp_state,
clean_tensor_name,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP, MLPStack
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skipIfRocm,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
Transformer,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
class TestFullyShard2DStateDict(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_2d_composability.py
|
init_model
|
def init_model(self, device_type, model_parallel_size=2):
torch.manual_seed(0)
model = MLPModule(device_type)
torch.manual_seed(0)
twod_model = MLPModule(device_type)
model = DDP(model)
# 2-D mesh is [dp, tp]
world_size = dist.get_world_size()
mesh_2d = init_device_mesh(
device_type,
(world_size // model_parallel_size, model_parallel_size),
mesh_dim_names=("dp", "tp"),
)
dp_pg = mesh_2d.get_group(mesh_dim=0)
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
twod_model = parallelize_module(twod_model, mesh_2d["tp"], parallelize_plan)
_pre_dp_module_transform(twod_model)
# TODO: Add tests when using gradient_as_bucket_view and static_graph for DDP.
twod_model = DDP(twod_model, process_group=dp_pg)
return model, twod_model, dp_pg
|
import copy
import functools
import io
from copy import deepcopy
from typing import List, Type
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._composable import replicate
from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard
from torch.distributed._tensor import DTensor, init_device_mesh, Replicate, Shard
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_optimizer_state_dict,
set_model_state_dict,
set_optimizer_state_dict,
StateDictOptions,
)
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import (
_get_module_fsdp_state,
clean_tensor_name,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP, MLPStack
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skipIfRocm,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
Transformer,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
class Test2dFSDP1ParallelIntegration(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_2d_composability.py
|
test_2d_fsdp_state_enable_extension
|
def test_2d_fsdp_state_enable_extension(self):
mesh_2d = init_device_mesh(
self.device_type, (2, self.world_size // 2), mesh_dim_names=("dp", "tp")
)
model = FSDP(
SimpleModel().cuda(),
device_mesh=mesh_2d["dp"],
)
fsdp_state = _get_module_fsdp_state(model)
self.assertTrue(isinstance(fsdp_state._fsdp_extension, DTensorExtensions))
|
import copy
import functools
import io
from copy import deepcopy
from typing import List, Type
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._composable import replicate
from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard
from torch.distributed._tensor import DTensor, init_device_mesh, Replicate, Shard
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_optimizer_state_dict,
set_model_state_dict,
set_optimizer_state_dict,
StateDictOptions,
)
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import (
_get_module_fsdp_state,
clean_tensor_name,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP, MLPStack
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skipIfRocm,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
Transformer,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
class TestNew2dParallelTraining(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_2d_composability.py
|
test_2d_e2e_training_not_use_orig_params
|
def test_2d_e2e_training_not_use_orig_params(self):
# TODO: need to revisit input_reshard API about why it failed multi-gpu tests.
# self._test_2d_e2e_training(recompute_activation=True)
self._test_2d_e2e_training(recompute_activation=False)
# TODO: update all state dict unit tests to use distributed.checkpoint.state_dict,
# and consolidate all the state_dict test in test.distributed.checkpoint.
|
import copy
import functools
import io
from copy import deepcopy
from typing import List, Type
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._composable import replicate
from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard
from torch.distributed._tensor import DTensor, init_device_mesh, Replicate, Shard
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_optimizer_state_dict,
set_model_state_dict,
set_optimizer_state_dict,
StateDictOptions,
)
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import (
_get_module_fsdp_state,
clean_tensor_name,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP, MLPStack
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skipIfRocm,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
Transformer,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
class TestNew2dParallelTraining(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate_with_compiler.py
|
__init__
|
def __init__(self, checkpoint=False):
super().__init__()
self.fc1 = nn.Linear(DIM, DIM)
self.fc2 = nn.Linear(DIM, DIM)
self.fc3 = nn.Linear(DIM, DIM)
self.fc4 = nn.Linear(DIM, DIM)
self.use_checkpoint = checkpoint
|
import contextlib
import functools
import os
import unittest
from copy import deepcopy
from typing import Callable, Optional
import torch
import torch.distributed as dist
from torch import _inductor as inductor, nn
from torch._C import FileCheck
from torch._dynamo import compiled_autograd
from torch._dynamo.utils import counters
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_triton_code
from torch.distributed._composable.replicate import replicate
from torch.distributed.algorithms.ddp_comm_hooks import (
default_hooks as ddp_default_hooks,
)
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
)
from torch.testing._internal.common_utils import run_tests, skipIfRocm
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
from torch.utils.checkpoint import checkpoint
DIM = 2000
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate_with_compiler.py
|
forward
|
def forward(self, x):
if self.use_checkpoint:
_fc1 = checkpoint(self.fc1, x, use_reentrant=False)
else:
_fc1 = self.fc1(x)
return self.fc4(self.fc3(self.fc2(_fc1)))
|
import contextlib
import functools
import os
import unittest
from copy import deepcopy
from typing import Callable, Optional
import torch
import torch.distributed as dist
from torch import _inductor as inductor, nn
from torch._C import FileCheck
from torch._dynamo import compiled_autograd
from torch._dynamo.utils import counters
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_triton_code
from torch.distributed._composable.replicate import replicate
from torch.distributed.algorithms.ddp_comm_hooks import (
default_hooks as ddp_default_hooks,
)
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
)
from torch.testing._internal.common_utils import run_tests, skipIfRocm
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
from torch.utils.checkpoint import checkpoint
DIM = 2000
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate_with_compiler.py
|
inner_compiler
|
def inner_compiler(gm_, example_inputs_):
if no_inductor:
return gm_
else:
return inductor.compile(gm_, example_inputs_)
gm = torch.compile(gm, fullgraph=True, backend=inner_compiler)
return gm
|
import contextlib
import functools
import os
import unittest
from copy import deepcopy
from typing import Callable, Optional
import torch
import torch.distributed as dist
from torch import _inductor as inductor, nn
from torch._C import FileCheck
from torch._dynamo import compiled_autograd
from torch._dynamo.utils import counters
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_triton_code
from torch.distributed._composable.replicate import replicate
from torch.distributed.algorithms.ddp_comm_hooks import (
default_hooks as ddp_default_hooks,
)
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
)
from torch.testing._internal.common_utils import run_tests, skipIfRocm
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
from torch.utils.checkpoint import checkpoint
DIM = 2000
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate_with_compiler.py
|
tearDown
|
def tearDown(self):
super().tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
|
import contextlib
import functools
import os
import unittest
from copy import deepcopy
from typing import Callable, Optional
import torch
import torch.distributed as dist
from torch import _inductor as inductor, nn
from torch._C import FileCheck
from torch._dynamo import compiled_autograd
from torch._dynamo.utils import counters
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_triton_code
from torch.distributed._composable.replicate import replicate
from torch.distributed.algorithms.ddp_comm_hooks import (
default_hooks as ddp_default_hooks,
)
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
)
from torch.testing._internal.common_utils import run_tests, skipIfRocm
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
from torch.utils.checkpoint import checkpoint
DIM = 2000
class ReplicateTest(MultiProcessInductorTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate_with_compiler.py
|
test_compile_cpu
|
def test_compile_cpu(self):
# Test the coalesced_op with CPU.
torch._inductor.config._fuse_ddp_communication_passes = [
"fuse_ddp_with_coalesced_op",
"schedule_comm_wait",
]
self._test_compile(use_gpu=False, no_sync=False)
|
import contextlib
import functools
import os
import unittest
from copy import deepcopy
from typing import Callable, Optional
import torch
import torch.distributed as dist
from torch import _inductor as inductor, nn
from torch._C import FileCheck
from torch._dynamo import compiled_autograd
from torch._dynamo.utils import counters
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_triton_code
from torch.distributed._composable.replicate import replicate
from torch.distributed.algorithms.ddp_comm_hooks import (
default_hooks as ddp_default_hooks,
)
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
)
from torch.testing._internal.common_utils import run_tests, skipIfRocm
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
from torch.utils.checkpoint import checkpoint
DIM = 2000
class ReplicateTest(MultiProcessInductorTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_checkpoint.py
|
test_tensor_only_gpu
|
def test_tensor_only_gpu(self, use_reentrant: bool):
x = torch.randn(20, 100, device="cuda:0")
net = ToyModel().to("cuda:0")
self._test_tensor_only(net, x, use_reentrant)
|
def test_tensor_only_gpu(self):
x = torch.randn(20, 100, device="cuda:0")
net = ToyModel().to("cuda:0")
self._test_tensor_only(net, x)
|
import unittest
from collections import deque
from contextlib import ContextDecorator
from copy import deepcopy
import torch
import torch.nn as nn
from torch.distributed._composable import checkpoint
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TestCase,
)
class TestCheckpoint(TestCase):
|
import unittest
from collections import deque, OrderedDict
from contextlib import ContextDecorator, contextmanager, nullcontext
from copy import deepcopy
from functools import partial
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._composable import checkpoint
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.checkpoint import CheckpointError
class TestCheckpoint(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_shard/sharded_tensor/ops/test_binary_cmp.py
|
test_torch_allclose
|
def test_torch_allclose(self):
""" Test torch.allclose(ShardedTensor, ShardedTensor) """
spec, alt_spec = self.get_gpu_specs()
st1, st2 = self.get_random_tensors(spec, spec, 10, 10)
self.assertTrue(torch.allclose(st1, st2))
self.assertTrue(torch.allclose(st1, st2, atol=0))
# compare different arrays
st1, st2 = self.get_random_tensors(spec, spec, 10, 10, seed_offset=1)
self.assertFalse(torch.allclose(st1, st2))
# sharded_tensor.rand produces uniform values in the [0,1] range.
self.assertTrue(torch.allclose(st1, st2, atol=1))
|
def test_torch_allclose(self):
"""Test torch.allclose(ShardedTensor, ShardedTensor)"""
spec, alt_spec = self.get_gpu_specs()
st1, st2 = self.get_random_tensors(spec, spec, 10, 10)
self.assertTrue(torch.allclose(st1, st2))
self.assertTrue(torch.allclose(st1, st2, atol=0))
# compare different arrays
st1, st2 = self.get_random_tensors(spec, spec, 10, 10, seed_offset=1)
self.assertFalse(torch.allclose(st1, st2))
# sharded_tensor.rand produces uniform values in the [0,1] range.
self.assertTrue(torch.allclose(st1, st2, atol=1))
|
import sys
import torch
import torch.distributed as dist
from torch.distributed._shard import sharded_tensor
from torch.distributed.distributed_c10d import _get_default_group
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
class TestShardedTensorBinaryOps(ShardedTensorTestBase):
|
import sys
import torch
import torch.distributed as dist
from torch.distributed._shard import sharded_tensor
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.distributed.distributed_c10d import _get_default_group
from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
class TestShardedTensorBinaryOps(ShardedTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_shard/sharded_tensor/ops/test_chunk.py
|
_compare_chunk_result
|
def _compare_chunk_result(self, chunked_list, chunked_st_list):
self.assertEqual(len(chunked_list), len(chunked_st_list))
for idx, chunked_st in enumerate(chunked_st_list):
tensor = chunked_list[idx]
st = _shard_tensor(tensor.contiguous(), chunked_st.sharding_spec())
# _shard_tensor generate sharded tensor with metadata ranked by # of rank.
st._metadata.shards_metadata.sort(
key=lambda x: x.shard_offsets[chunked_st.sharding_spec().dim],
)
self.assertTrue(torch.allclose(chunked_st, st))
|
import sys
import torch
from torch.distributed._shard import sharded_tensor, _shard_tensor
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
generate_chunk_sharding_specs_for_test,
generate_enumerable_sharding_specs_for_test,
)
class TestShardedTensorChunkOps(ShardedTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_shard/sharded_tensor/ops/test_chunk.py
|
test_sharded_chunk_error
|
def test_sharded_chunk_error(self):
chunk_spec = generate_chunk_sharding_specs_for_test(-1)
with self.assertRaisesRegex(
NotImplementedError, "Chunk by sharding dim is not supported."
):
st = sharded_tensor.rand(chunk_spec[0], [17, 24])
torch.chunk(st, 5, dim=-1)
enumerable_spec = generate_enumerable_sharding_specs_for_test()
with self.assertRaisesRegex(
NotImplementedError, "Only ChunkShardingSpec is supported for chunk."
):
st = sharded_tensor.rand(enumerable_spec[0], [10, 10])
torch.chunk(st, 5, dim=-1)
|
import sys
import torch
from torch.distributed._shard import sharded_tensor, _shard_tensor
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
generate_chunk_sharding_specs_for_test,
generate_enumerable_sharding_specs_for_test,
)
class TestShardedTensorChunkOps(ShardedTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_shard/sharded_tensor/ops/test_elementwise_ops.py
|
_reset_random_seed
|
def _reset_random_seed():
torch.manual_seed(self.rank + 4)
specs = generate_chunk_sharding_specs_for_test(
0
) + generate_chunk_sharding_specs_for_test(1)
for spec in specs:
self._run_sharded_elementwise_ops(
spec,
[12, 17],
torch.nn.functional.dropout,
p=0.4,
reset_seed=_reset_random_seed,
)
self._run_sharded_elementwise_ops(
spec,
[18, 21],
torch.nn.functional.dropout,
p=0.5,
reset_seed=_reset_random_seed,
)
_reset_random_seed()
dropout = torch.nn.Dropout(p=0.8)
self._run_sharded_elementwise_ops(
spec, [17, 23], dropout, reset_seed=_reset_random_seed
)
|
import sys
import torch
from torch.distributed._shard import sharded_tensor, _shard_tensor
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
generate_chunk_sharding_specs_for_test,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
_chunk_sharding_specs_list_for_test,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_shard/sharded_tensor/ops/test_elementwise_ops.py
|
test_sharded_tensor_nan_to_num
|
def test_sharded_tensor_nan_to_num(self):
specs = _chunk_sharding_specs_list_for_test([0, 1], seed=10)
for spec in specs:
tensor = torch.rand(16, 12).cuda(self.rank)
tensor[:, :2] = float('nan')
tensor[:, 4:5] = float('inf')
tensor[:, 10:] = -float('inf')
st = _shard_tensor(tensor, spec)
st_expected = _shard_tensor(torch.nan_to_num(tensor), spec)
st = torch.nan_to_num(st)
self.assertTrue(torch.allclose(st, st_expected))
|
import sys
import torch
from torch.distributed._shard import sharded_tensor, _shard_tensor
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
generate_chunk_sharding_specs_for_test,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
_chunk_sharding_specs_list_for_test,
)
class TestShardedTensorElementWiseOps(ShardedTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_composable/test_checkpoint.py
|
test_multi_args
|
instantiate_parametrized_tests(TestCheckpoint)
|
def test_multi_args(self):
"""
Tests checkpoint for modules with multiple output args and hence
multiple backward function input args.
"""
device = torch.device("cpu")
net1 = nn.Sequential(
MultiOutputModel(device),
MultiInputModel(device),
MultiOutputModel(device),
MultiInputModel(device),
)
net2 = deepcopy(net1)
checkpoint(net2[0])
checkpoint(net2[2])
x1 = torch.randn(20, 100, requires_grad=True)
x2 = x1.clone()
net1(x1).sum().backward()
net2(x2).sum().backward()
for p1, p2 in zip(net1.parameters(), net2.parameters()):
self.assertEqual(p1.grad, p2.grad)
|
import unittest
from collections import deque, OrderedDict
from contextlib import ContextDecorator, contextmanager, nullcontext
from copy import deepcopy
from functools import partial
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._composable import checkpoint
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.checkpoint import CheckpointError
class TestCheckpoint(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/_composable/test_checkpoint.py
|
test_clears_state_on_error_in_forward
|
def test_clears_state_on_error_in_forward(self):
class MyModel(torch.nn.Module):
def __init__(self, raise_in_recomp):
super().__init__()
self.fwd_count = 0
self.raise_in_recomp = raise_in_recomp
self.a = torch.nn.Linear(2, 2)
def forward(self, x):
if self.raise_in_recomp and self.fwd_count == 1:
raise RuntimeError("foo")
else:
if not self.raise_in_recomp:
# raise in the first forward
raise RuntimeError("foo")
self.fwd_count += 1
return self.a(x)
m = MyModel(raise_in_recomp=True)
m_seq = torch.nn.Sequential(OrderedDict({"m": m}))
checkpoint(m_seq.m)
inp = torch.randn(1, 2)
out = m_seq(inp).sum()
# Should raise in forward recomputation
with self.assertRaisesRegex(RuntimeError, "foo"):
out.backward()
# Check that _ac_generator is cleared out
self.assertEqual(None, checkpoint.state(m)._ac_generator)
m = MyModel(raise_in_recomp=False)
checkpoint(m)
inp = torch.randn(1, 2)
# Should raise in first forward
with self.assertRaises(RuntimeError):
m(inp)
self.assertEqual(None, checkpoint.state(m)._ac_generator)
|
import unittest
from collections import deque, OrderedDict
from contextlib import ContextDecorator, contextmanager, nullcontext
from copy import deepcopy
from functools import partial
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._composable import checkpoint
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.checkpoint import CheckpointError
class TestCheckpoint(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_checkpoint.py
|
test_checkpoint_kwargs
|
def test_checkpoint_kwargs(self):
class MyModel(torch.nn.Module):
def __init__(self, raise_exp: bool, change_shape_in_recomp: bool):
super().__init__()
self.fwd_count = 0
self.raise_exp = raise_exp
self.change_shape_in_recomp = change_shape_in_recomp
self.a = torch.nn.Linear(2, 2)
def forward(self, x):
if self.raise_exp and self.fwd_count == 0:
raise RuntimeError("foo")
if self.raise_exp and self.fwd_count == 1:
raise RuntimeError("bar")
if self.change_shape_in_recomp and self.fwd_count == 1:
x.relu_()
random_tensor = torch.randn(1, 2)
x = self.a(x + random_tensor)
self.fwd_count += 1
return x
m = MyModel(True, False)
m0, m1, m2, m3 = (deepcopy(m) for _ in range(4))
# composable checkpoint does not support use_reentrant=True
with self.assertRaisesRegex(
NotImplementedError,
"use_reentrant=True is not supported in composable checkpoint. "
"Please use torch.utils.checkpoint.checkpoint instead.",
):
checkpoint(m, use_reentrant=True)
# check giving an unsupported kwarg
with self.assertRaisesRegex(ValueError, "Unexpected keyword arguments: foo"):
checkpoint(m0, foo="bar")
handled_fwd_exp = False
handled_recomp_exp = False
@contextmanager
def fwd_ctx(mod: MyModel):
try:
mod.raise_exp = False
yield
finally:
nonlocal handled_fwd_exp
handled_fwd_exp = True
mod.raise_exp = True
@contextmanager
def recomp_ctx(mod: MyModel):
try:
mod.raise_exp = False
yield
finally:
nonlocal handled_recomp_exp
handled_recomp_exp = True
mod.raise_exp = True
# Test different context functions
x = torch.randn(1, 2, requires_grad=True)
checkpoint(
m1, context_fn=lambda: (partial(fwd_ctx, m1)(), partial(recomp_ctx, m1)())
)
m1(x.clone()).sum().backward()
self.assertEqual((handled_fwd_exp, handled_recomp_exp), (True, True))
checkpoint(m2, context_fn=lambda: (nullcontext(), partial(recomp_ctx, m2)()))
with self.assertRaisesRegex(RuntimeError, "foo"):
m2(x.clone())
handled_fwd_exp = False # Reset flag
checkpoint(m3, context_fn=lambda: (partial(fwd_ctx, m3)(), nullcontext()))
with self.assertRaisesRegex(RuntimeError, "bar"):
m3(x.clone()).sum().backward()
self.assertEqual(handled_fwd_exp, True)
# Test determinism check failure
m4 = MyModel(False, True)
m5 = deepcopy(m4)
# Determinism check should not throw an error,
# but autograd should throw a RuntimeError
checkpoint(m4, determinism_check="none")
with self.assertRaises(RuntimeError):
m4(x.clone()).sum().backward()
# Determinism check should throw a CheckpointError
checkpoint(m5, determinism_check="default")
with self.assertRaises(CheckpointError):
m5(x.clone()).sum().backward()
# Test preserving random state
m6 = MyModel(False, False)
m7, m8 = (deepcopy(m6) for _ in range(2))
checkpoint(m7, preserve_rng_state=False)
checkpoint(m8, preserve_rng_state=True)
for mi in (m6, m7, m8):
torch.manual_seed(42)
loss = mi(x.clone()).sum()
torch.manual_seed(41)
loss.backward()
# check that m6 and m7 have at least one different grad
self.assertNotEqual(
(p1.grad for p1 in m6.parameters()), (p2.grad for p2 in m7.parameters())
)
# check that m6 and m8 have identical grads
for p1, p2 in zip(m6.parameters(), m8.parameters()):
self.assertEqual(p1.grad, p2.grad)
|
import unittest
from collections import deque, OrderedDict
from contextlib import ContextDecorator, contextmanager, nullcontext
from copy import deepcopy
from functools import partial
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._composable import checkpoint
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.checkpoint import CheckpointError
class TestCheckpoint(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_checkpoint.py
|
recomp_ctx
|
if __name__ == "__main__":
run_tests()
|
def recomp_ctx(mod: MyModel):
try:
mod.raise_exp = False
yield
finally:
nonlocal handled_recomp_exp
handled_recomp_exp = True
mod.raise_exp = True
# Test different context functions
x = torch.randn(1, 2, requires_grad=True)
checkpoint(
m1, context_fn=lambda: (partial(fwd_ctx, m1)(), partial(recomp_ctx, m1)())
)
m1(x.clone()).sum().backward()
self.assertEqual((handled_fwd_exp, handled_recomp_exp), (True, True))
checkpoint(m2, context_fn=lambda: (nullcontext(), partial(recomp_ctx, m2)()))
with self.assertRaisesRegex(RuntimeError, "foo"):
m2(x.clone())
handled_fwd_exp = False # Reset flag
checkpoint(m3, context_fn=lambda: (partial(fwd_ctx, m3)(), nullcontext()))
with self.assertRaisesRegex(RuntimeError, "bar"):
m3(x.clone()).sum().backward()
self.assertEqual(handled_fwd_exp, True)
# Test determinism check failure
m4 = MyModel(False, True)
m5 = deepcopy(m4)
# Determinism check should not throw an error,
# but autograd should throw a RuntimeError
checkpoint(m4, determinism_check="none")
with self.assertRaises(RuntimeError):
m4(x.clone()).sum().backward()
# Determinism check should throw a CheckpointError
checkpoint(m5, determinism_check="default")
with self.assertRaises(CheckpointError):
m5(x.clone()).sum().backward()
# Test preserving random state
m6 = MyModel(False, False)
m7, m8 = (deepcopy(m6) for _ in range(2))
checkpoint(m7, preserve_rng_state=False)
checkpoint(m8, preserve_rng_state=True)
for mi in (m6, m7, m8):
torch.manual_seed(42)
loss = mi(x.clone()).sum()
torch.manual_seed(41)
loss.backward()
# check that m6 and m7 have at least one different grad
self.assertNotEqual(
(p1.grad for p1 in m6.parameters()), (p2.grad for p2 in m7.parameters())
)
# check that m6 and m8 have identical grads
for p1, p2 in zip(m6.parameters(), m8.parameters()):
self.assertEqual(p1.grad, p2.grad)
|
import unittest
from collections import deque, OrderedDict
from contextlib import ContextDecorator, contextmanager, nullcontext
from copy import deepcopy
from functools import partial
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._composable import checkpoint
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.checkpoint import CheckpointError
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/_composable/test_composability/test_2d_composability.py
|
__init__
|
def __init__(self):
super().__init__()
self.net1 = nn.Linear(5, 8)
self.relu = nn.ReLU()
self.net2 = nn.Linear(8, 4)
self.net3 = nn.Linear(4, 12)
|
import copy
import functools
import io
from copy import deepcopy
from typing import List, Type
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._composable import replicate
from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard
from torch.distributed._tensor import DTensor, init_device_mesh, Replicate, Shard
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_optimizer_state_dict,
set_model_state_dict,
set_optimizer_state_dict,
StateDictOptions,
)
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import (
_get_module_fsdp_state,
clean_tensor_name,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP, MLPStack
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skipIfRocm,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
Transformer,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
class SimpleModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_2d_composability.py
|
forward
|
def forward(self, x):
x = F.relu(self.net1(x))
x = F.relu(self.net2(x))
x = F.relu(self.net3(x))
return x
|
import copy
import functools
import io
from copy import deepcopy
from typing import List, Type
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._composable import replicate
from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard
from torch.distributed._tensor import DTensor, init_device_mesh, Replicate, Shard
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_optimizer_state_dict,
set_model_state_dict,
set_optimizer_state_dict,
StateDictOptions,
)
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import (
_get_module_fsdp_state,
clean_tensor_name,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP, MLPStack
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skipIfRocm,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
Transformer,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
class SimpleModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_2d_composability.py
|
get_input
|
def get_input(self):
return torch.rand(4, 5, device="cuda")
|
import copy
import functools
import io
from copy import deepcopy
from typing import List, Type
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._composable import replicate
from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard
from torch.distributed._tensor import DTensor, init_device_mesh, Replicate, Shard
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_optimizer_state_dict,
set_model_state_dict,
set_optimizer_state_dict,
StateDictOptions,
)
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import (
_get_module_fsdp_state,
clean_tensor_name,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP, MLPStack
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skipIfRocm,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
Transformer,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
class SimpleModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_2d_composability.py
|
__init__
|
def __init__(self):
super().__init__()
self.net1 = nn.Linear(5, 8)
self.relu = nn.ReLU()
self.net2 = nn.Linear(8, 4)
self.net3 = nn.Linear(4, 12)
|
import copy
import functools
import io
from copy import deepcopy
from typing import List, Type
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._composable import replicate
from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard
from torch.distributed._tensor import DTensor, init_device_mesh, Replicate, Shard
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_optimizer_state_dict,
set_model_state_dict,
set_optimizer_state_dict,
StateDictOptions,
)
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import (
_get_module_fsdp_state,
clean_tensor_name,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP, MLPStack
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skipIfRocm,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
Transformer,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
class SimpleModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_2d_composability.py
|
forward
|
def forward(self, x):
x = F.relu(self.net1(x))
x = F.relu(self.net2(x))
x = F.relu(self.net3(x))
return x
|
import copy
import functools
import io
from copy import deepcopy
from typing import List, Type
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._composable import replicate
from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard
from torch.distributed._tensor import DTensor, init_device_mesh, Replicate, Shard
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_optimizer_state_dict,
set_model_state_dict,
set_optimizer_state_dict,
StateDictOptions,
)
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import (
_get_module_fsdp_state,
clean_tensor_name,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP, MLPStack
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skipIfRocm,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
Transformer,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
class SimpleModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_2d_composability.py
|
get_input
|
def get_input(self):
return torch.rand(4, 5, device="cuda")
|
import copy
import functools
import io
from copy import deepcopy
from typing import List, Type
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._composable import replicate
from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard
from torch.distributed._tensor import DTensor, init_device_mesh, Replicate, Shard
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_optimizer_state_dict,
set_model_state_dict,
set_optimizer_state_dict,
StateDictOptions,
)
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import (
_get_module_fsdp_state,
clean_tensor_name,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP, MLPStack
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skipIfRocm,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
Transformer,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
class SimpleModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_2d_composability.py
|
test_train_parity_2d_mlp
|
def test_train_parity_2d_mlp(self):
global_mesh = self.init_global_mesh()
self.run_subtests(
{
"reshard_after_forward": [False, True],
"use_activation_checkpointing": [False, True],
# TODO: change "mlp_dim" back to [3, 16, 17] when uneven sharding
# is supported for FSDP+TP
"mlp_dim": [4, 16, 20],
},
functools.partial(self._test_train_parity_2d_mlp, global_mesh),
)
|
import copy
import functools
import io
from copy import deepcopy
from typing import List, Type
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._composable import replicate
from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard
from torch.distributed._tensor import DTensor, init_device_mesh, Replicate, Shard
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_optimizer_state_dict,
set_model_state_dict,
set_optimizer_state_dict,
StateDictOptions,
)
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import (
_get_module_fsdp_state,
clean_tensor_name,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP, MLPStack
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skipIfRocm,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
Transformer,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
class TestFullyShard2DTraining(FSDPTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_2d_composability.py
|
test_train_parity_2d_transformer_checkpoint_resume
|
def test_train_parity_2d_transformer_checkpoint_resume(self):
"""
Tests train parity of a 2D transformer without checkpointing against a
2D transformer with a checkpoint save/load.
"""
self.run_subtests(
{
"use_seq_parallel": [False, True],
# If reusing, then load into the same model/optimizer instance
# else construct new ones (requiring eager optim state init)
"reuse_model_optim": [False, True],
"optimizer_class": [torch.optim.Adam, torch.optim.AdamW],
# TODO: need to update `parallelize` before including foreach=True for testing
"foreach": [False],
},
self._test_train_parity_2d_transformer_checkpoint_resume,
)
|
import copy
import functools
import io
from copy import deepcopy
from typing import List, Type
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._composable import replicate
from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard
from torch.distributed._tensor import DTensor, init_device_mesh, Replicate, Shard
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_optimizer_state_dict,
set_model_state_dict,
set_optimizer_state_dict,
StateDictOptions,
)
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import (
_get_module_fsdp_state,
clean_tensor_name,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP, MLPStack
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skipIfRocm,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
Transformer,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
class TestFullyShard2DTraining(FSDPTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_2d_composability.py
|
parallelize
|
def parallelize(_model: Transformer, mesh: DeviceMesh, use_seq_parallel: bool):
_model = Transformer.parallelize(_model, mesh["tp"], use_seq_parallel)
for layer in _model.layers:
fully_shard(layer, mesh=mesh["dp"])
fully_shard(_model, mesh=mesh["dp"])
return _model
global_mesh = self.init_global_mesh()
# Baseline: run two iterations without checkpointing
seed = 42
torch.manual_seed(seed)
model_args = ModelArgs(dropout_p=0.0)
model_no_cp = parallelize(
Transformer(model_args), global_mesh, use_seq_parallel
)
optim_no_cp = optimizer_class(
model_no_cp.parameters(), lr=1e-2, foreach=foreach
)
torch.manual_seed(42 + global_mesh["dp"].get_local_rank() + 1)
inp = torch.randint(0, model_args.vocab_size, (3, 16), device="cuda")
loss_no_cp1 = train_step(model_no_cp, optim_no_cp, inp)
loss_no_cp2 = train_step(model_no_cp, optim_no_cp, inp)
# Test: run one iteration, save checkpoint, zero states or init new
# model/optimizer, load checkpoint, and run another iteration
torch.manual_seed(seed)
model_cp = parallelize(Transformer(model_args), global_mesh, use_seq_parallel)
optim_cp = optimizer_class(model_cp.parameters(), lr=1e-2, foreach=foreach)
loss_cp1 = train_step(model_cp, optim_cp, inp)
self.assertEqual(loss_no_cp1, loss_cp1)
sharded_sd = {
"model": get_model_state_dict(model_cp),
# Use `get_optimizer_state_dict` to handle eager optim state init
# when constructing a new optimizer instance
"optim": get_optimizer_state_dict(model_cp, optim_cp),
}
dcp.save(
state_dict=sharded_sd,
storage_writer=dcp.FileSystemWriter(self.temp_dir),
)
if reuse_model_optim:
with torch.no_grad():
for param in model_cp.parameters():
param.zero_()
optim_sd = optim_cp.state_dict()
for param_states in optim_sd["state"].values():
for state_value in param_states.values():
if torch.is_tensor(state_value):
state_value.zero_()
else:
torch.manual_seed(seed + 1) # different seed
model_cp = parallelize(
Transformer(model_args), global_mesh, use_seq_parallel
)
optim_cp = optimizer_class(model_cp.parameters(), lr=1e-2, foreach=foreach)
self.assertNotEqual(loss_no_cp2, train_step(model_cp, optim_cp, inp))
sharded_sd = {
"model": get_model_state_dict(model_cp),
"optim": get_optimizer_state_dict(model_cp, optim_cp),
}
dcp.load(
state_dict=sharded_sd,
storage_reader=dcp.FileSystemReader(self.temp_dir),
)
self.assertGreater(len(optim_cp.state_dict()["state"]), 0)
loss_cp2 = train_step(model_cp, optim_cp, inp)
self.assertEqual(loss_no_cp2, loss_cp2)
|
import copy
import functools
import io
from copy import deepcopy
from typing import List, Type
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._composable import replicate
from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard
from torch.distributed._tensor import DTensor, init_device_mesh, Replicate, Shard
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_optimizer_state_dict,
set_model_state_dict,
set_optimizer_state_dict,
StateDictOptions,
)
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import (
_get_module_fsdp_state,
clean_tensor_name,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.ddp import _pre_dp_module_transform
from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP, MLPStack
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skipIfRocm,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
Transformer,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/fully_shard/test_fully_shard_runtime.py
|
_test_unshard_reshard_order
|
def _test_unshard_reshard_order(self, fsdp_wrap_mode: FSDPWrapMode):
device = torch.device("cuda")
(
composable_module,
composable_optim,
fsdp_wrapped_model,
fsdp_wrapped_optim,
) = self._init_models_and_optims(device, fsdp_wrap_mode)
# Before checking the unshard/reshard order, sanity check that the
# assumption about wrapper FQN being a suffix of composable FQN holds
all_composable_handles = traversal_utils._get_fsdp_handles(composable_module)
all_wrapped_handles = traversal_utils._get_fsdp_handles(fsdp_wrapped_model)
self._check_same_param_handles(all_composable_handles, all_wrapped_handles)
num_handles = len(all_composable_handles)
orig_unshard = torch.distributed.fsdp._runtime_utils._unshard
orig_reshard = torch.distributed.fsdp._runtime_utils._reshard
UnshardReshardEvent = Tuple[str, _HandlesKey]
def patched_unshard(
unshard_reshard_order: List[UnshardReshardEvent],
state: _FSDPState,
handles: List[FlatParamHandle],
*args,
**kwargs,
):
handles_key = tuple(handles)
unshard_reshard_order.append(("unshard", handles_key))
return orig_unshard(state, handles, *args, **kwargs)
def patched_reshard(
unshard_reshard_order: List[UnshardReshardEvent],
state: _FSDPState,
handles: List[FlatParamHandle],
*args,
**kwargs,
):
handles_key = tuple(handles)
unshard_reshard_order.append(("reshard", handles_key))
return orig_reshard(state, handles, *args, **kwargs)
@contextlib.contextmanager
def patch_unshard(_patched_unshard: Callable):
_orig_unshard = torch.distributed.fsdp._runtime_utils._unshard
torch.distributed.fsdp._runtime_utils._unshard = _patched_unshard
try:
yield
finally:
torch.distributed.fsdp._runtime_utils._unshard = _orig_unshard
@contextlib.contextmanager
def patch_reshard(_patched_reshard: Callable):
_orig_reshard = torch.distributed.fsdp._runtime_utils._reshard
torch.distributed.fsdp._runtime_utils._reshard = _patched_reshard
try:
yield
finally:
torch.distributed.fsdp._runtime_utils._unshard = _orig_reshard
composable_order: List[UnshardReshardEvent] = []
wrapped_order: List[UnshardReshardEvent] = []
inp = torch.randn(2, 100, device="cuda")
losses: List[torch.Tensor] = []
for order, model, optim in (
(composable_order, composable_module, composable_optim),
(wrapped_order, fsdp_wrapped_model, fsdp_wrapped_optim),
):
with patch_unshard(
functools.partial(patched_unshard, order)
), patch_reshard(functools.partial(patched_reshard, order)):
optim.zero_grad(set_to_none=True)
out = model(inp)
loss = out.sum()
losses.append(loss)
loss.backward()
optim.step()
self.assertEqual(losses[0], losses[1])
# Sanity check that the unshard/reshard events were recorded, where we
# expect one unshard/reshard pair for forward, one pair for backward,
# and possibly some extra unshards from backward prefetching (in this
# case, we expect exactly 2 extra since there are 3 handles)
self.assertGreaterEqual(len(composable_order), 2 * 2 * num_handles)
self.assertGreaterEqual(len(wrapped_order), 2 * 2 * num_handles)
self.assertGreaterEqual(
len([e for e in composable_order if e[0] == "unshard"]), 2 * num_handles
)
self.assertGreaterEqual(
len([e for e in wrapped_order if e[0] == "unshard"]), 2 * num_handles
)
self.assertGreaterEqual(
len([e for e in composable_order if e[0] == "reshard"]), 2 * num_handles
)
self.assertGreaterEqual(
len([e for e in wrapped_order if e[0] == "reshard"]), 2 * num_handles
)
# Check that the unshard/reshard order matches
self.assertEqual(len(composable_order), len(wrapped_order))
for (
(composable_event, composable_handles_key),
(wrapped_event, wrapped_handles_key),
) in zip(composable_order, wrapped_order):
self.assertEqual(composable_event, wrapped_event)
self._check_same_param_handles(composable_handles_key, wrapped_handles_key)
|
def _test_unshard_reshard_order(self, fsdp_wrap_mode: FSDPWrapMode):
device = torch.device("cuda")
(
composable_module,
composable_optim,
fsdp_wrapped_model,
fsdp_wrapped_optim,
) = self._init_models_and_optims(
device, fsdp_wrap_mode, ShardingStrategy.FULL_SHARD
)
# Before checking the unshard/reshard order, sanity check that the
# assumption about wrapper FQN being a suffix of composable FQN holds
all_composable_handles = traversal_utils._get_fsdp_handles(composable_module)
all_wrapped_handles = traversal_utils._get_fsdp_handles(fsdp_wrapped_model)
for c_handle, w_handle in zip(all_composable_handles, all_wrapped_handles):
self._check_same_param_handles(c_handle, w_handle)
num_handles = len(all_composable_handles)
orig_unshard = torch.distributed.fsdp._runtime_utils._unshard
orig_reshard = torch.distributed.fsdp._runtime_utils._reshard
UnshardReshardEvent = Tuple[str, FlatParamHandle]
def patched_unshard(
unshard_reshard_order: List[UnshardReshardEvent],
state: _FSDPState,
handle: FlatParamHandle,
*args,
**kwargs,
):
unshard_reshard_order.append(("unshard", handle))
return orig_unshard(state, handle, *args, **kwargs)
def patched_reshard(
unshard_reshard_order: List[UnshardReshardEvent],
state: _FSDPState,
handle: FlatParamHandle,
*args,
**kwargs,
):
unshard_reshard_order.append(("reshard", handle))
return orig_reshard(state, handle, *args, **kwargs)
@contextlib.contextmanager
def patch_unshard(_patched_unshard: Callable):
_orig_unshard = torch.distributed.fsdp._runtime_utils._unshard
torch.distributed.fsdp._runtime_utils._unshard = _patched_unshard
try:
yield
finally:
torch.distributed.fsdp._runtime_utils._unshard = _orig_unshard
@contextlib.contextmanager
def patch_reshard(_patched_reshard: Callable):
_orig_reshard = torch.distributed.fsdp._runtime_utils._reshard
torch.distributed.fsdp._runtime_utils._reshard = _patched_reshard
try:
yield
finally:
torch.distributed.fsdp._runtime_utils._unshard = _orig_reshard
composable_order: List[UnshardReshardEvent] = []
wrapped_order: List[UnshardReshardEvent] = []
inp = torch.randn(2, 100, device="cuda")
losses: List[torch.Tensor] = []
for order, model, optim in (
(composable_order, composable_module, composable_optim),
(wrapped_order, fsdp_wrapped_model, fsdp_wrapped_optim),
):
with patch_unshard(
functools.partial(patched_unshard, order)
), patch_reshard(functools.partial(patched_reshard, order)):
optim.zero_grad(set_to_none=True)
out = model(inp)
loss = out.sum()
losses.append(loss)
loss.backward()
optim.step()
self.assertEqual(losses[0], losses[1])
# Sanity check that the unshard/reshard events were recorded, where we
# expect one unshard/reshard pair for forward, one pair for backward,
# and possibly some extra unshards from backward prefetching (in this
# case, we expect exactly 2 extra since there are 3 handles)
self.assertGreaterEqual(len(composable_order), 2 * 2 * num_handles)
self.assertGreaterEqual(len(wrapped_order), 2 * 2 * num_handles)
self.assertGreaterEqual(
len([e for e in composable_order if e[0] == "unshard"]), 2 * num_handles
)
self.assertGreaterEqual(
len([e for e in wrapped_order if e[0] == "unshard"]), 2 * num_handles
)
self.assertGreaterEqual(
len([e for e in composable_order if e[0] == "reshard"]), 2 * num_handles
)
self.assertGreaterEqual(
len([e for e in wrapped_order if e[0] == "reshard"]), 2 * num_handles
)
# Check that the unshard/reshard order matches
self.assertEqual(len(composable_order), len(wrapped_order))
for (
(composable_event, composable_handles_key),
(wrapped_event, wrapped_handles_key),
) in zip(composable_order, wrapped_order):
self.assertEqual(composable_event, wrapped_event)
self._check_same_param_handles(composable_handles_key, wrapped_handles_key)
|
import contextlib
import copy
import functools
import sys
from enum import auto, Enum
from typing import Callable, Iterable, List, Tuple
import torch
import torch.distributed as dist
import torch.distributed.fsdp._traversal_utils as traversal_utils
import torch.nn as nn
from torch.distributed._composable import fully_shard
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import _FSDPState
from torch.distributed.fsdp.flat_param import _HandlesKey, FlatParamHandle
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.testing._internal.common_dist_composable import (
CompositeParamModel,
UnitModule,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
class TestRuntime(FSDPTest):
|
import contextlib
import copy
import functools
import sys
from enum import auto, Enum
from typing import Callable, List, Tuple
import torch
import torch.distributed as dist
import torch.distributed.fsdp._traversal_utils as traversal_utils
import torch.nn as nn
from torch.distributed._composable import fully_shard
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, ShardingStrategy
from torch.distributed.fsdp._common_utils import _FSDPState
from torch.distributed.fsdp._flat_param import FlatParamHandle
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.testing._internal.common_dist_composable import (
CompositeParamModel,
UnitModule,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
class TestRuntime(FSDPTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_composable/fully_shard/test_fully_shard_util.py
|
test_get_sharded_module_tree_with_module_name_to_fqns
|
def test_get_sharded_module_tree_with_module_name_to_fqns(self):
model = CompositeModel(torch.device("cuda"))
fully_shard(
model,
policy=ModuleWrapPolicy({UnitModule}),
)
(
sharded_tree_info,
sharded_module_name_to_fqns,
) = _get_sharded_module_tree_with_module_name_to_fqns(model)
self.assertEqual(
list(sharded_module_name_to_fqns.keys()),
["[CompositeModel]", "u1[UnitModule]", "u2[UnitModule]"],
)
self.assertEqual(
list(sharded_module_name_to_fqns.values()),
[
["l1.weight", "l1.bias", "l2.weight", "l2.bias"],
[
"u1.l1.weight",
"u1.l1.bias",
"u1.seq.1.weight",
"u1.seq.1.bias",
"u1.l2.weight",
"u1.l2.bias",
],
[
"u2.l1.weight",
"u2.l1.bias",
"u2.seq.1.weight",
"u2.seq.1.bias",
"u2.l2.weight",
"u2.l2.bias",
],
],
)
# Test nested fully_shard
new_model = CompositeModel(torch.device("cuda"))
fully_shard(new_model.u1)
fully_shard(new_model)
(
sharded_tree_info,
sharded_module_name_to_fqns,
) = _get_sharded_module_tree_with_module_name_to_fqns(new_model)
self.assertEqual(
list(sharded_module_name_to_fqns.keys()),
["[CompositeModel]", "u1[UnitModule]"],
)
self.assertEqual(
list(sharded_module_name_to_fqns.values()),
[
[
"l1.weight",
"l1.bias",
"u2.l1.weight",
"u2.l1.bias",
"u2.seq.1.weight",
"u2.seq.1.bias",
"u2.l2.weight",
"u2.l2.bias",
"l2.weight",
"l2.bias",
],
[
"u1.l1.weight",
"u1.l1.bias",
"u1.seq.1.weight",
"u1.seq.1.bias",
"u1.l2.weight",
"u1.l2.bias",
],
],
)
|
import sys
import torch
import torch.distributed as dist
from torch.distributed._composable import fully_shard
from torch.distributed.fsdp._debug_utils import (
_get_sharded_module_tree_with_module_name_to_fqns,
)
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.testing._internal.common_dist_composable import CompositeModel, UnitModule
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
class TestUtils(FSDPTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate_with_compiler.py
|
test_compile_backward_only
|
def test_compile_backward_only(self):
self._test_compile(use_gpu=True, no_sync=False, no_compile_forward=True)
|
import contextlib
import functools
import os
import unittest
from copy import deepcopy
from typing import Callable, Optional
import torch
import torch.distributed as dist
from torch import _inductor as inductor, nn
from torch._C import FileCheck
from torch._dynamo import compiled_autograd
from torch._dynamo.utils import counters
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_triton_code
from torch.distributed._composable.replicate import replicate
from torch.distributed.algorithms.ddp_comm_hooks import (
default_hooks as ddp_default_hooks,
)
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
)
from torch.testing._internal.common_utils import run_tests, skipIfRocm
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
from torch.utils.checkpoint import checkpoint
DIM = 2000
class ReplicateTest(MultiProcessInductorTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate_with_compiler.py
|
bwd
|
def bwd(loss):
with compiled_autograd.enable(compiler_fn()):
loss.backward()
for i in range(loop):
loss = compiled_replicate_model(input).sum()
if i != loop - 1:
# Leave the last bwd for the run_and_get_triton_code.
bwd(loss)
code = run_and_get_triton_code(functools.partial(bwd, loss=loss))
self.assertEqual(counters["inductor"]["ddp_buckets"], 3)
return code
|
import contextlib
import functools
import os
import unittest
from copy import deepcopy
from typing import Callable, Optional
import torch
import torch.distributed as dist
from torch import _inductor as inductor, nn
from torch._C import FileCheck
from torch._dynamo import compiled_autograd
from torch._dynamo.utils import counters
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_triton_code
from torch.distributed._composable.replicate import replicate
from torch.distributed.algorithms.ddp_comm_hooks import (
default_hooks as ddp_default_hooks,
)
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
)
from torch.testing._internal.common_utils import run_tests, skipIfRocm
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
from torch.utils.checkpoint import checkpoint
DIM = 2000
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate_with_compiler.py
|
test_bucketing_concat_op
|
def test_bucketing_concat_op(self):
# Gradient is None
code = self._test_bucketing()
self.assertEqual(counters["inductor"]["ddp_buckets"], 3)
fc = FileCheck()
for i in range(3):
fc.check("aten.flatten.using_ints(").check("cpp_fused_").check(
"torch.ops._c10d_functional.all_reduce_.default("
)
for i in range(3):
fc.check("torch.ops._c10d_functional.wait_tensor.default")
fc.run(code)
# Gradient is not None
code = self._test_bucketing(init_process_group=False, loop=2)
self.assertEqual(counters["inductor"]["ddp_buckets"], 3)
fc = FileCheck()
for i in range(3):
fc.check("aten.flatten.using_ints(").check("cpp_fused_").check(
"torch.ops._c10d_functional.all_reduce_.default("
)
for i in range(3):
fc.check("torch.ops._c10d_functional.wait_tensor.default")
fc.run(code)
|
import contextlib
import functools
import os
import unittest
from copy import deepcopy
from typing import Callable, Optional
import torch
import torch.distributed as dist
from torch import _inductor as inductor, nn
from torch._C import FileCheck
from torch._dynamo import compiled_autograd
from torch._dynamo.utils import counters
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_triton_code
from torch.distributed._composable.replicate import replicate
from torch.distributed.algorithms.ddp_comm_hooks import (
default_hooks as ddp_default_hooks,
)
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
)
from torch.testing._internal.common_utils import run_tests, skipIfRocm
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
from torch.utils.checkpoint import checkpoint
DIM = 2000
class ReplicateTest(MultiProcessInductorTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate_with_compiler.py
|
setUp
|
def setUp(self) -> None:
super().setUp()
self._spawn_processes()
|
import contextlib
import functools
import os
import unittest
from copy import deepcopy
from typing import Callable, Optional
import torch
import torch.distributed as dist
from torch import _inductor as inductor, nn
from torch._C import FileCheck
from torch._dynamo import compiled_autograd
from torch._dynamo.utils import counters
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_triton_code
from torch.distributed._composable.replicate import replicate
from torch.distributed.algorithms.ddp_comm_hooks import (
default_hooks as ddp_default_hooks,
)
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
)
from torch.testing._internal.common_utils import run_tests, skipIfRocm
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
from torch.utils.checkpoint import checkpoint
DIM = 2000
class ReplicateTest(MultiProcessInductorTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate_with_compiler.py
|
tearDown
|
def tearDown(self):
super().tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
|
import contextlib
import functools
import os
import unittest
from copy import deepcopy
from typing import Callable, Optional
import torch
import torch.distributed as dist
from torch import _inductor as inductor, nn
from torch._C import FileCheck
from torch._dynamo import compiled_autograd
from torch._dynamo.utils import counters
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_triton_code
from torch.distributed._composable.replicate import replicate
from torch.distributed.algorithms.ddp_comm_hooks import (
default_hooks as ddp_default_hooks,
)
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
)
from torch.testing._internal.common_utils import run_tests, skipIfRocm
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
from torch.utils.checkpoint import checkpoint
DIM = 2000
class ReplicateTest(MultiProcessInductorTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate_with_compiler.py
|
test_ddp_tp
|
def test_ddp_tp(self):
ref_model = Net()
compiled_replicate_model = deepcopy(ref_model)
mesh_2d = init_device_mesh(
"cuda", (2, self.world_size // 2), mesh_dim_names=("dp", "tp")
)
tp_mesh = mesh_2d["tp"]
dp_mesh = mesh_2d["dp"]
parallelize_plan = {
"fc1": ColwiseParallel(),
"fc2": RowwiseParallel(),
"fc3": ColwiseParallel(),
"fc4": RowwiseParallel(),
}
ref_model = parallelize_module(ref_model, tp_mesh, parallelize_plan)
ref_model = replicate(ref_model, device_mesh=dp_mesh)
compiled_replicate_model = parallelize_module(
compiled_replicate_model, tp_mesh, parallelize_plan
)
compiled_replicate_model = replicate(
compiled_replicate_model, device_mesh=dp_mesh
)
compiled_replicate_model = torch.compile(compiled_replicate_model)
data = torch.randn([1, DIM])
with compiled_autograd.enable(compiler_fn()):
loss = compiled_replicate_model(data).sum()
# TODO: We need "pre-dispatch tracing of backward graph" to make this work:
# https://github.com/pytorch/pytorch/issues/127797#issuecomment-2291695474
with self.assertRaisesRegex(
AssertionError,
"Expected ProxyTensor, got <class 'torch.distributed._tensor.api.DTensor'>",
):
loss.backward()
# ref_loss = ref_model(data).sum()
# ref_loss.backward()
# for p1, p2 in zip(
# ref_model.parameters(), compiled_replicate_model.parameters()
# ):
# self.assertEqual(p1.grad, p2.grad)
|
import contextlib
import functools
import os
import unittest
from copy import deepcopy
from typing import Callable, Optional
import torch
import torch.distributed as dist
from torch import _inductor as inductor, nn
from torch._C import FileCheck
from torch._dynamo import compiled_autograd
from torch._dynamo.utils import counters
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_triton_code
from torch.distributed._composable.replicate import replicate
from torch.distributed.algorithms.ddp_comm_hooks import (
default_hooks as ddp_default_hooks,
)
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
)
from torch.testing._internal.common_utils import run_tests, skipIfRocm
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
from torch.utils.checkpoint import checkpoint
DIM = 2000
class DDP_TP_Test(InductorTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_shard/sharded_tensor/ops/test_math_ops.py
|
test_sharded_bmm_errors
|
def test_sharded_bmm_errors(self):
specs = generate_chunk_sharding_specs_for_test(0)
st_lhs = sharded_tensor.rand(specs[0], (15, 5, 6))
st_rhs = sharded_tensor.rand(specs[1], (15, 5, 6))
with self.assertRaisesRegex(
NotImplementedError,
"Both st and st2 need to have same placements for bmm",
):
torch.bmm(st_lhs, st_rhs)
for spec in specs:
st_lhs = sharded_tensor.rand(spec, (20, 3))
st_rhs = sharded_tensor.rand(spec, (20, 3))
with self.assertRaisesRegex(
TypeError,
"both st and st2 need to be a 3D ShardedTensor",
):
torch.bmm(st_lhs, st_rhs)
rhs = torch.rand(15, 5, 6).cuda(self.rank)
with self.assertRaisesRegex(
TypeError,
"st2 needs to be a ShardedTensor for torch.bmm",
):
torch.bmm(st_lhs, rhs)
spec.dim = 1
st_lhs = sharded_tensor.rand(spec, (15, 5, 6))
st_rhs = sharded_tensor.rand(spec, (15, 5, 6))
with self.assertRaisesRegex(
NotImplementedError,
"Only support performing bmm on tensors sharded on dim 0 now",
):
torch.bmm(st_lhs, st_rhs)
|
import torch
from torch.distributed._shard import _shard_tensor
import torch.distributed._shard.sharded_tensor as sharded_tensor
import torch.distributed as dist
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
EnumerableShardingSpec,
ShardMetadata,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
gen_binary_op_func,
generate_chunk_sharding_specs_for_test,
)
class TestMathOps(ShardedTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_shard/sharded_tensor/ops/test_matrix_ops.py
|
test_sharded_tensor_softmax
|
def test_sharded_tensor_softmax(self):
specs = _chunk_sharding_specs_list_for_test([0, 2], seed=17)
for spec in specs:
tensor = torch.rand(15, 27, 16).cuda(self.rank)
tensor_n = torch.nn.functional.softmax(tensor, dim=1, dtype=torch.float32)
st_expected = _shard_tensor(tensor_n, spec)
self.assertTrue(
torch.allclose(
torch.nn.functional.softmax(
_shard_tensor(tensor, spec), dim=1, dtype=torch.float32
),
st_expected,
)
)
|
import copy
import itertools
import sys
import torch
from torch.distributed._shard import sharded_tensor, _shard_tensor
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
generate_enumerable_sharding_specs_for_test,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
_chunk_sharding_specs_list_for_test,
)
class TestShardedTensorMatrixOps(ShardedTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_composable/test_replicate.py
|
forward
|
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
|
def forward(self, x):
return self.fc3(self.fc2(self.fc1(x)))
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.replicate import replicate
from torch.testing._internal.common_distributed import MultiProcessTestCase
from torch.testing._internal.common_utils import run_tests
class Net(nn.Module):
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.fsdp import fully_shard
from torch.distributed._composable.replicate import replicate
from torch.distributed._tensor import DTensor
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_composable/test_replicate.py
|
test_replicate_ignore_module
|
def test_replicate_ignore_module(self):
self._init_pg()
torch.cuda.set_device(self.rank)
# Seed ensures diff input and thus different local grads across ranks.
torch.manual_seed(self.rank)
torch.cuda.manual_seed(self.rank)
model = Net().cuda()
replicate(model, ignored_modules=[model.fc1])
# CPU input ensures that replicate can move input to GPU as DDP does.
inp = torch.randn(5, 2, device="cuda") * (self.rank + 1)
out = model(inp) * 10
out.sum().backward()
# FC1 grads should not be synchronized, FC2 and 3 should be.
fc1_grad = model.fc1.weight.grad
tensor_list = [torch.zeros_like(fc1_grad) for _ in range(dist.get_world_size())]
dist.all_gather(tensor_list, fc1_grad)
grad, rest = tensor_list[0], tensor_list[1:]
for g in rest:
self.assertNotEqual(grad, g)
for dp_grad in [model.fc2.weight.grad, model.fc3.weight.grad]:
tensor_list = [
torch.zeros_like(dp_grad) for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, dp_grad)
grad, rest = tensor_list[0], tensor_list[1:]
for g in rest:
self.assertEqual(grad, g)
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.fsdp import fully_shard
from torch.distributed._composable.replicate import replicate
from torch.distributed._tensor import DTensor
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class ReplicateTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate.py
|
test_replicate_multi_module
|
def test_replicate_multi_module(self):
model = Net()
replicate_model = deepcopy(model)
replicate(replicate_model.fc1)
replicate(replicate_model.fc2)
replicate(replicate_model.fc3)
self._compare_module(model, replicate_model)
|
def test_replicate_multi_module(self):
self._init_pg()
model = Net()
replicate_model = deepcopy(model)
replicate(replicate_model.fc1)
replicate(replicate_model.fc2)
replicate(replicate_model.fc3)
self._compare_module(model, replicate_model)
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.replicate import replicate
from torch.testing._internal.common_distributed import MultiProcessTestCase
from torch.testing._internal.common_utils import run_tests
class ReplicateTest(MultiProcessTestCase):
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.fsdp import fully_shard
from torch.distributed._composable.replicate import replicate
from torch.distributed._tensor import DTensor
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class ReplicateTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_composable/test_replicate.py
|
test_replicate_device_id
|
def test_replicate_device_id(self):
self._init_pg()
model = Net()
model_cuda = deepcopy(model).cuda()
model_cuda2 = deepcopy(model_cuda)
replicate(model, device_id=torch.device("cpu"))
# DDP instance is attached in first pre forward
model(torch.randn(2, 2))
replicate_ddp_weakref = replicate.state(model)._ddp_weakref()
# Should be None for CPU training
self.assertEqual(None, replicate_ddp_weakref.device_ids)
replicate(model_cuda, device_id=torch.device(torch.cuda.current_device()))
# DDP instance is attached in first pre forward
model_cuda(torch.randn(2, 2))
replicate_ddp_weakref = replicate.state(model_cuda)._ddp_weakref()
self.assertEqual([0], replicate_ddp_weakref.device_ids)
# Pass in int as device_id
replicate(model_cuda2, device_id=int(torch.cuda.current_device()))
# DDP instance is attached in first pre forward
model_cuda2(torch.randn(2, 2))
replicate_ddp_weakref = replicate.state(model_cuda2)._ddp_weakref()
self.assertEqual([0], replicate_ddp_weakref.device_ids)
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.fsdp import fully_shard
from torch.distributed._composable.replicate import replicate
from torch.distributed._tensor import DTensor
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class ReplicateTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate.py
|
test_replicate_wrong_device_id_type
|
def test_replicate_wrong_device_id_type(self):
self._init_pg()
model = Net()
with self.assertRaisesRegex(
RuntimeError, "Expected device_id to be int or torch.device"
):
replicate(model, device_id=[torch.device("cpu")])
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.fsdp import fully_shard
from torch.distributed._composable.replicate import replicate
from torch.distributed._tensor import DTensor
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class ReplicateTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate.py
|
test_replicate_fully_shard_init
|
def test_replicate_fully_shard_init(self):
class ToyModel(nn.Module):
def __init__(self, dim: int):
super().__init__()
self.linears = nn.Sequential(
nn.Linear(dim, dim, bias=False),
nn.Linear(dim, dim, bias=False),
nn.Linear(dim, dim, bias=False),
)
self.proj = nn.Linear(dim, dim, bias=False)
def forward(self, x: torch.Tensor):
y = self.linears(x)
y = self.proj(y)
return y
self._init_pg()
torch.cuda.set_device(self.rank)
dim = 3
bz = 2
model = ToyModel(dim).cuda()
for linear in model.linears:
fully_shard(linear)
fully_shard(model.linears)
replicate(model, device_id=torch.cuda.current_device())
for linear in model.linears:
self.assertTrue(isinstance(linear.weight, DTensor))
inp = torch.rand(bz, dim)
# trigger lazy init
model(inp).sum()
for linear in model.linears:
self.assertTrue(isinstance(linear.weight, DTensor))
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.fsdp import fully_shard
from torch.distributed._composable.replicate import replicate
from torch.distributed._tensor import DTensor
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class ReplicateFullyShardInit(ReplicateTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate.py
|
__init__
|
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 2)
self.fc2 = nn.Linear(2, 2)
self.fc3 = nn.Linear(2, 2)
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.replicate import replicate
from torch.testing._internal.common_distributed import MultiProcessTestCase
from torch.testing._internal.common_utils import run_tests
class Net(nn.Module):
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.fsdp import fully_shard
from torch.distributed._composable.replicate import replicate
from torch.distributed._tensor import DTensor
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_composable/test_replicate.py
|
forward
|
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
|
def forward(self, x):
return self.fc3(self.fc2(self.fc1(x)))
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.replicate import replicate
from torch.testing._internal.common_distributed import MultiProcessTestCase
from torch.testing._internal.common_utils import run_tests
class Net(nn.Module):
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.fsdp import fully_shard
from torch.distributed._composable.replicate import replicate
from torch.distributed._tensor import DTensor
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_shard/sharded_tensor/ops/test_matrix_ops.py
|
test_sharded_tensor_layer_norm_error
|
def test_sharded_tensor_layer_norm_error(self):
specs = _chunk_sharding_specs_list_for_test([2], seed=10)
for spec in specs:
tensor = torch.rand(16, 35, 26).cuda(self.rank)
with self.assertRaisesRegex(
ValueError,
"normalized_shape dim must not be greater "
"than the dim of the sharded tensor.",
):
layer_norm = torch.nn.LayerNorm((14, 55, 35, 26)).cuda(self.rank)
layer_norm(_shard_tensor(tensor, spec))
with self.assertRaisesRegex(
ValueError,
r"Given normalized_shape=\[35\], expected input with shape "
r"\[\*, 35\], but got input of size \[16, 35, 26\].",
):
layer_norm = torch.nn.LayerNorm((35)).cuda(self.rank)
layer_norm(_shard_tensor(tensor, spec))
|
import copy
import itertools
import sys
import torch
from torch.distributed._shard import sharded_tensor, _shard_tensor
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
generate_enumerable_sharding_specs_for_test,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
_chunk_sharding_specs_list_for_test,
)
class TestShardedTensorMatrixOps(ShardedTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_shard/sharded_tensor/ops/test_softmax.py
|
test_sharded_softmax_on_sharding_dim
|
def test_sharded_softmax_on_sharding_dim(self):
self._test_sharded_softmax(1, 1)
self._test_sharded_softmax(-1, 1)
|
import sys
import torch
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.distributed._shard import _shard_tensor
class TestShardedSoftmax(ShardedTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_composable/test_contract.py
|
__init__
|
def __init__(self):
super().__init__()
self.seq1 = nn.Sequential(*[nn.Linear(10, 10) for _ in range(2)])
self.seq2 = nn.Sequential(*[nn.Linear(10, 10) for _ in range(2)])
self.p = nn.Parameter(torch.randn(10, 10), requires_grad=True)
self.b = torch.zeros(1) # buffer
|
def __init__(self) -> None:
super().__init__()
self.seq1 = nn.Sequential(*[nn.Linear(10, 10) for _ in range(2)])
self.seq2 = nn.Sequential(*[nn.Linear(10, 10) for _ in range(2)])
self.p = nn.Parameter(torch.randn(10, 10), requires_grad=True)
self.b = torch.zeros(1) # buffer
|
from copy import deepcopy
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._composable import _get_registry, contract
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
class ToyModel(nn.Module):
|
from copy import deepcopy
from typing import List, Tuple
import torch
import torch.nn as nn
from torch.distributed._composable import _get_registry, contract
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
class ToyModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_composable/test_contract.py
|
test_multi_module_api
|
if __name__ == "__main__":
run_tests()
|
def test_multi_module_api(self):
@contract()
def multi_module_api(modules: List[nn.Module]) -> nn.Module:
return modules
model = nn.Sequential(*[nn.Linear(3, 3) for _ in range(5)])
multi_module_api([model[0], model[1]])
multi_module_api([model[2], model[3]])
multi_module_api([model[4]])
# Check that modules have the same state and registry iff they shared
# the same API call
states = [multi_module_api.state(module) for module in model]
self.assertEqual(states[0], states[1])
self.assertEqual(states[2], states[3])
self.assertNotEqual(states[0], states[2])
self.assertNotEqual(states[0], states[4])
self.assertNotEqual(states[2], states[4])
registries = [_get_registry(module) for module in model]
self.assertEqual(registries[0], registries[1])
self.assertEqual(registries[2], registries[3])
self.assertNotEqual(registries[0], registries[2])
self.assertNotEqual(registries[0], registries[4])
self.assertNotEqual(registries[2], registries[4])
# Check that applying an API to a module multiple times errors
model = nn.Sequential(*[nn.Linear(3, 3) for _ in range(5)])
multi_module_api([model[0], model[1]])
with self.assertRaisesRegex(
AssertionError,
"Each distinct composable distributed API can only be applied to "
r"a module once. multi_module_api has already been applied to the "
"following module:",
):
multi_module_api([model[0], model[2]])
|
from copy import deepcopy
from typing import List, Tuple
import torch
import torch.nn as nn
from torch.distributed._composable import _get_registry, contract
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
class TestContract(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/_composable/test_replicate.py
|
__init__
|
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 2)
self.fc2 = nn.Linear(2, 2)
self.fc3 = nn.Linear(2, 2)
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.replicate import replicate
from torch.testing._internal.common_distributed import MultiProcessTestCase
from torch.testing._internal.common_utils import run_tests
class Net(nn.Module):
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.fsdp import fully_shard
from torch.distributed._composable.replicate import replicate
from torch.distributed._tensor import DTensor
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_composable/test_replicate.py
|
forward
|
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
|
def forward(self, x):
return self.fc3(self.fc2(self.fc1(x)))
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.replicate import replicate
from torch.testing._internal.common_distributed import MultiProcessTestCase
from torch.testing._internal.common_utils import run_tests
class Net(nn.Module):
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.fsdp import fully_shard
from torch.distributed._composable.replicate import replicate
from torch.distributed._tensor import DTensor
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_composable/test_composability/test_pp_composability.py
|
test_manual_with_data_parallel
|
def test_manual_with_data_parallel(self, dp_type, ScheduleClass, use_new_runtime):
device = torch.device("cuda", self.device)
torch.cuda.set_device(self.device)
store = torch.distributed.FileStore(self.file_name, self.world_size)
torch.distributed.init_process_group(
backend="nccl",
store=store,
rank=self.rank,
world_size=self.world_size,
device_id=device,
)
device_mesh = init_device_mesh(
"cuda", mesh_shape=(2, 2), mesh_dim_names=("dp", "pp")
)
pp_group = device_mesh["pp"].get_group()
dp_mesh = device_mesh["dp"]
# create "entire model"
total_layers = 8
dim = 10
full_model = nn.ModuleList([MLPModule(dim) for _ in range(total_layers)])
ref_model = nn.Sequential(*copy.deepcopy(full_model))
ref_model.to(self.device)
# Prepare inputs
num_microbatches = 8
inputs = [
torch.rand((num_microbatches, dim), device=self.device)
for _ in range(dp_mesh.size())
]
input = inputs[dp_mesh.get_local_rank()]
input_mb = [[input[i].reshape((1, dim))] for i in range(num_microbatches)]
# dummy loss needed just to force backwards to run in schedule step
def loss_fn(y, target):
return y.sum()
# Get stage module i from the entire model
def get_stage_module(stage_idx, num_stages):
# divide the model (8 layers) by the number of stages
layers_per_stage = total_layers // num_stages
assert layers_per_stage * num_stages == total_layers
# return offset so validation code can match partial layer back to orig model
offset = stage_idx * layers_per_stage
partial_model = nn.Sequential(
*full_model[offset : (stage_idx + 1) * layers_per_stage]
)
partial_model.to(self.device)
return partial_model, offset
# Apply DP to stage module
def apply_dp(partial_model, dp_type):
if dp_type == "FSDP":
# apply FSDP
mp_policy = MixedPrecisionPolicy(
# TODO(whc) need to fix PP + FSDP-mixed-precision
# tracer for PP assumes f32 and is caught off guard when runtime FSDP interacts using bf16 inputs
# param_dtype=torch.bfloat16, reduce_dtype=torch.float32
param_dtype=torch.float32,
reduce_dtype=torch.float32,
)
fsdp_config = {"mesh": dp_mesh, "mp_policy": mp_policy}
for layer in partial_model.children():
fully_shard(
layer,
**fsdp_config,
reshard_after_forward=False,
)
dp_model = fully_shard(partial_model, **fsdp_config)
elif dp_type == "DDP":
dp_model = DDP(partial_model, process_group=dp_mesh.get_group())
else:
raise RuntimeError(f"unsupported dp type {dp_type}")
return dp_model
# Create pipeline stage
def build_stage(stage_idx, num_stages):
partial_model, offset = get_stage_module(stage_idx, num_stages)
dp_model = apply_dp(partial_model, dp_type)
stage = PipelineStage(
dp_model,
stage_idx,
num_stages,
self.device,
group=pp_group,
input_args=input_mb[0],
)
return stage, offset
# Attach to a schedule
if issubclass(ScheduleClass, PipelineScheduleSingle):
if use_new_runtime:
# Can't test PipelineScheduleSingle classes using new runtime
# return should still clean up this test instance correctly
torch.distributed.destroy_process_group()
return
pipeline_stage, offset = build_stage(pp_group.rank(), pp_group.size())
partial_models = [pipeline_stage.submod]
offsets = [offset]
pipeline_schedule = ScheduleClass(
pipeline_stage,
n_microbatches=num_microbatches,
loss_fn=loss_fn,
)
else:
n_virtual = 2
num_stages = pp_group.size() * n_virtual
stages = []
offsets = []
for i in range(n_virtual):
stage, offset = build_stage(pp_group.rank() + n_virtual * i, num_stages)
stages.append(stage)
offsets.append(offset)
partial_models = [pipeline_stage.submod for pipeline_stage in stages]
pipeline_schedule = ScheduleClass(
stages,
n_microbatches=num_microbatches,
loss_fn=loss_fn,
)
# Run
pipeline_schedule._step_microbatches(arg_mbs=input_mb, target_mbs=input_mb)
# Ref model runs on 2 different inputs, accumulating grads across them.
# this ensures that we detect if the FSDP reduce becomes a no-op.
# (in fsdp case, we use one of these inputs on each DP rank)
(ref_model(inputs[0]).sum()).backward()
(ref_model(inputs[1]).sum()).backward()
# simulate the built-in averaging done by FSDP
for p in ref_model.parameters():
p.grad /= dp_mesh.size()
# Validate that whichever weights we have locally match that part of our local/full ref model
# (we force FSDP's grads to be all-gathered (.full_tensor) to make it simpler)
ref_parameters = dict(ref_model.named_parameters())
if dp_type == "FSDP":
for partial_model, offset in zip(partial_models, offsets):
for name, p in partial_model.named_parameters():
parts = name.split(".")
parts[0] = str(int(parts[0]) + offset)
name = ".".join(parts)
ref_p = ref_parameters[name]
self.assertTrue(isinstance(p.grad, DTensor))
torch.testing.assert_close(
ref_p.grad, p.grad.full_tensor(), rtol=1e-5, atol=5e-5
)
elif dp_type == "DDP":
for partial_model, offset in zip(partial_models, offsets):
for name, p in partial_model.named_parameters():
parts = name.split(".")[1:] # remove the "module." prefix
parts[0] = str(int(parts[0]) + offset)
name = ".".join(parts)
ref_p = ref_parameters[name]
torch.testing.assert_close(ref_p.grad, p.grad, rtol=1e-5, atol=5e-5)
torch.distributed.destroy_process_group()
|
import copy
import os
import torch
import torch.nn as nn
from torch.distributed._composable.fsdp.fully_shard import (
fully_shard,
MixedPrecisionPolicy,
)
from torch.distributed._tensor import DTensor
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.pipelining import PipelineStage
from torch.distributed.pipelining.schedules import (
PipelineScheduleSingle,
Schedule1F1B,
ScheduleFlexibleInterleaved1F1B,
ScheduleGPipe,
ScheduleInterleaved1F1B,
ScheduleInterleavedZeroBubble,
ScheduleLoopedBFS,
)
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skip_but_pass_in_sandcastle_if,
)
class ComposabilityTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_pp_composability.py
|
loss_fn
|
def loss_fn(y, target):
return y.sum()
# Get stage module i from the entire model
|
import copy
import os
import torch
import torch.nn as nn
from torch.distributed._composable.fsdp.fully_shard import (
fully_shard,
MixedPrecisionPolicy,
)
from torch.distributed._tensor import DTensor
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.pipelining import PipelineStage
from torch.distributed.pipelining.schedules import (
PipelineScheduleSingle,
Schedule1F1B,
ScheduleFlexibleInterleaved1F1B,
ScheduleGPipe,
ScheduleInterleaved1F1B,
ScheduleInterleavedZeroBubble,
ScheduleLoopedBFS,
)
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skip_but_pass_in_sandcastle_if,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_pp_composability.py
|
get_stage_module
|
def get_stage_module(stage_idx, num_stages):
# divide the model (8 layers) by the number of stages
layers_per_stage = total_layers // num_stages
assert layers_per_stage * num_stages == total_layers
# return offset so validation code can match partial layer back to orig model
offset = stage_idx * layers_per_stage
partial_model = nn.Sequential(
*full_model[offset : (stage_idx + 1) * layers_per_stage]
)
partial_model.to(self.device)
return partial_model, offset
# Apply DP to stage module
|
import copy
import os
import torch
import torch.nn as nn
from torch.distributed._composable.fsdp.fully_shard import (
fully_shard,
MixedPrecisionPolicy,
)
from torch.distributed._tensor import DTensor
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.pipelining import PipelineStage
from torch.distributed.pipelining.schedules import (
PipelineScheduleSingle,
Schedule1F1B,
ScheduleFlexibleInterleaved1F1B,
ScheduleGPipe,
ScheduleInterleaved1F1B,
ScheduleInterleavedZeroBubble,
ScheduleLoopedBFS,
)
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skip_but_pass_in_sandcastle_if,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_pp_composability.py
|
apply_dp
|
def apply_dp(partial_model, dp_type):
if dp_type == "FSDP":
# apply FSDP
mp_policy = MixedPrecisionPolicy(
# TODO(whc) need to fix PP + FSDP-mixed-precision
# tracer for PP assumes f32 and is caught off guard when runtime FSDP interacts using bf16 inputs
# param_dtype=torch.bfloat16, reduce_dtype=torch.float32
param_dtype=torch.float32,
reduce_dtype=torch.float32,
)
fsdp_config = {"mesh": dp_mesh, "mp_policy": mp_policy}
for layer in partial_model.children():
fully_shard(
layer,
**fsdp_config,
reshard_after_forward=False,
)
dp_model = fully_shard(partial_model, **fsdp_config)
elif dp_type == "DDP":
dp_model = DDP(partial_model, process_group=dp_mesh.get_group())
else:
raise RuntimeError(f"unsupported dp type {dp_type}")
return dp_model
# Create pipeline stage
|
import copy
import os
import torch
import torch.nn as nn
from torch.distributed._composable.fsdp.fully_shard import (
fully_shard,
MixedPrecisionPolicy,
)
from torch.distributed._tensor import DTensor
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.pipelining import PipelineStage
from torch.distributed.pipelining.schedules import (
PipelineScheduleSingle,
Schedule1F1B,
ScheduleFlexibleInterleaved1F1B,
ScheduleGPipe,
ScheduleInterleaved1F1B,
ScheduleInterleavedZeroBubble,
ScheduleLoopedBFS,
)
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skip_but_pass_in_sandcastle_if,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_composability/test_pp_composability.py
|
build_stage
|
def build_stage(stage_idx, num_stages):
partial_model, offset = get_stage_module(stage_idx, num_stages)
dp_model = apply_dp(partial_model, dp_type)
stage = PipelineStage(
dp_model,
stage_idx,
num_stages,
self.device,
group=pp_group,
input_args=input_mb[0],
)
return stage, offset
# Attach to a schedule
if issubclass(ScheduleClass, PipelineScheduleSingle):
if use_new_runtime:
# Can't test PipelineScheduleSingle classes using new runtime
# return should still clean up this test instance correctly
torch.distributed.destroy_process_group()
return
pipeline_stage, offset = build_stage(pp_group.rank(), pp_group.size())
partial_models = [pipeline_stage.submod]
offsets = [offset]
pipeline_schedule = ScheduleClass(
pipeline_stage,
n_microbatches=num_microbatches,
loss_fn=loss_fn,
)
else:
n_virtual = 2
num_stages = pp_group.size() * n_virtual
stages = []
offsets = []
for i in range(n_virtual):
stage, offset = build_stage(pp_group.rank() + n_virtual * i, num_stages)
stages.append(stage)
offsets.append(offset)
partial_models = [pipeline_stage.submod for pipeline_stage in stages]
pipeline_schedule = ScheduleClass(
stages,
n_microbatches=num_microbatches,
loss_fn=loss_fn,
)
# Run
pipeline_schedule._step_microbatches(arg_mbs=input_mb, target_mbs=input_mb)
# Ref model runs on 2 different inputs, accumulating grads across them.
# this ensures that we detect if the FSDP reduce becomes a no-op.
# (in fsdp case, we use one of these inputs on each DP rank)
(ref_model(inputs[0]).sum()).backward()
(ref_model(inputs[1]).sum()).backward()
# simulate the built-in averaging done by FSDP
for p in ref_model.parameters():
p.grad /= dp_mesh.size()
# Validate that whichever weights we have locally match that part of our local/full ref model
# (we force FSDP's grads to be all-gathered (.full_tensor) to make it simpler)
ref_parameters = dict(ref_model.named_parameters())
if dp_type == "FSDP":
for partial_model, offset in zip(partial_models, offsets):
for name, p in partial_model.named_parameters():
parts = name.split(".")
parts[0] = str(int(parts[0]) + offset)
name = ".".join(parts)
ref_p = ref_parameters[name]
self.assertTrue(isinstance(p.grad, DTensor))
torch.testing.assert_close(
ref_p.grad, p.grad.full_tensor(), rtol=1e-5, atol=5e-5
)
elif dp_type == "DDP":
for partial_model, offset in zip(partial_models, offsets):
for name, p in partial_model.named_parameters():
parts = name.split(".")[1:] # remove the "module." prefix
parts[0] = str(int(parts[0]) + offset)
name = ".".join(parts)
ref_p = ref_parameters[name]
torch.testing.assert_close(ref_p.grad, p.grad, rtol=1e-5, atol=5e-5)
torch.distributed.destroy_process_group()
|
import copy
import os
import torch
import torch.nn as nn
from torch.distributed._composable.fsdp.fully_shard import (
fully_shard,
MixedPrecisionPolicy,
)
from torch.distributed._tensor import DTensor
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.pipelining import PipelineStage
from torch.distributed.pipelining.schedules import (
PipelineScheduleSingle,
Schedule1F1B,
ScheduleFlexibleInterleaved1F1B,
ScheduleGPipe,
ScheduleInterleaved1F1B,
ScheduleInterleavedZeroBubble,
ScheduleLoopedBFS,
)
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
skip_but_pass_in_sandcastle_if,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_compose.py
|
test_wrap_same_submodule
|
def test_wrap_same_submodule(self, use_reentrant: bool):
model = UnitModule(device=torch.device("cuda"))
base_model = copy.deepcopy(model)
test_model = copy.deepcopy(model)
# compose checkpoint and fully_shard
test_model.seq = checkpoint(test_model.seq, use_reentrant=use_reentrant)
test_model.seq = fully_shard(
test_model.seq,
policy=ModuleWrapPolicy({nn.Linear}),
)
self.run_subtests(
{
"base_model": [base_model],
"test_model": [test_model],
"inp_size": [torch.Size((2, 100))],
"inp_device": [torch.device("cuda")],
"grad_to_none": [True, False],
"use_same_inputs_across_ranks": [True],
},
self._test_parity,
)
|
def test_wrap_same_submodule(self):
model = UnitModule(device=torch.device("cuda"))
base_model = copy.deepcopy(model)
test_model = copy.deepcopy(model)
# compose checkpoint and fully_shard
test_model.seq = checkpoint(test_model.seq)
test_model.seq = fully_shard(
test_model.seq,
policy=ModuleWrapPolicy({nn.Linear}),
)
self.run_subtests(
{
"base_model": [base_model],
"test_model": [test_model],
"inp_size": [torch.Size((2, 100))],
"inp_device": [torch.device("cuda")],
"grad_to_none": [True, False],
"use_same_inputs_across_ranks": [True],
},
self._test_parity,
)
|
import copy
import sys
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed._composable import checkpoint, fully_shard, replicate
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.testing._internal.common_dist_composable import (
CompositeModel,
CompositeParamModel,
UnitModule,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
class TestFSDPCheckpoint(FSDPTest):
|
import copy
import sys
from typing import Dict
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed._composable import checkpoint, fully_shard, replicate
from torch.distributed._shard.sharded_tensor import ShardedTensor
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType
from torch.distributed.fsdp.api import MixedPrecision, ShardingStrategy
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.testing._internal.common_dist_composable import (
CompositeModel,
CompositeParamModel,
UnitModule,
)
from torch.testing._internal.common_distributed import (
SaveForwardInputsModel,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
class TestFSDPCheckpoint(FSDPTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_shard/sharded_tensor/ops/test_init.py
|
test_init_sharded_tensor_with_kaiming_uniform
|
def test_init_sharded_tensor_with_kaiming_uniform(self):
""" Test torch.nn.init.kaiming_uniform_(ShardedTensor, a, mode, nonlinearit) """
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 8, 2
expected_h = 2
expected_device = torch.device(f"cuda:{self.rank}")
a, mode, nonlinearity = 0, 'fan_in', 'leaky_relu'
seed = 1234
dtype = torch.double
st = sharded_tensor.empty(spec, h, w, dtype=dtype)
self.assertEqual(1, len(st.local_shards()))
# Clone local tensor to ensure torch.nn.init starts from the same input
local_tensor_clone = torch.clone(st.local_shards()[0].tensor)
torch.manual_seed(seed)
torch.nn.init.kaiming_uniform_(st, a=a, mode=mode, nonlinearity=nonlinearity)
torch.manual_seed(seed)
torch.nn.init.kaiming_uniform_(local_tensor_clone, a=a, mode=mode, nonlinearity=nonlinearity)
self.assertEqual(local_tensor_clone, st.local_shards()[0].tensor)
|
def test_init_sharded_tensor_with_kaiming_uniform(self):
"""Test torch.nn.init.kaiming_uniform_(ShardedTensor, a, mode, nonlinearit)"""
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 8, 2
expected_h = 2
expected_device = torch.device(f"cuda:{self.rank}")
a, mode, nonlinearity = 0, "fan_in", "leaky_relu"
seed = 1234
dtype = torch.double
st = sharded_tensor.empty(spec, h, w, dtype=dtype)
self.assertEqual(1, len(st.local_shards()))
# Clone local tensor to ensure torch.nn.init starts from the same input
local_tensor_clone = torch.clone(st.local_shards()[0].tensor)
torch.manual_seed(seed)
torch.nn.init.kaiming_uniform_(st, a=a, mode=mode, nonlinearity=nonlinearity)
torch.manual_seed(seed)
torch.nn.init.kaiming_uniform_(
local_tensor_clone, a=a, mode=mode, nonlinearity=nonlinearity
)
self.assertEqual(local_tensor_clone, st.local_shards()[0].tensor)
|
import sys
import torch
from torch.distributed._shard import sharded_tensor
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
class TestShardedTensorNNInit(ShardedTensorTestBase):
|
import sys
import torch
from torch.distributed._shard import sharded_tensor
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
class TestShardedTensorNNInit(ShardedTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_shard/sharded_tensor/ops/test_linear.py
|
test_sharded_linear_errors
|
def test_sharded_linear_errors(self):
for spec in generate_chunk_sharding_specs_for_test(0):
fc1 = torch.nn.Linear(10, 10).cuda(self.rank)
shard_parameter(fc1, "weight", spec)
shard_parameter(fc1, "bias", spec)
with self.assertRaisesRegex(TypeError, 'bias needs to be torch.Tensor'):
fc1(torch.rand(10, 10).cuda(self.rank))
fc2 = torch.nn.Linear(10, 10).cuda(self.rank)
shard_parameter(fc2, "weight", spec)
with self.assertRaisesRegex(ValueError, 'Input needs to have at least 1 dim'):
fc2(torch.tensor(1).cuda(self.rank))
fc3 = torch.nn.Linear(10, 10).cuda(self.rank)
fc3.weight = torch.nn.Parameter(torch.rand(10, 10, 10).cuda(self.rank))
shard_parameter(fc3, "weight", spec)
with self.assertRaisesRegex(ValueError, 'Weight needs to have exactly 2 dims'):
fc3(torch.rand(10, 10).cuda(self.rank))
fc4 = torch.nn.Linear(10, 10).cuda(self.rank)
fc4.bias = torch.nn.Parameter(torch.rand(10, 10).cuda(self.rank))
shard_parameter(fc4, "weight", spec)
with self.assertRaisesRegex(ValueError, 'Bias needs to have exactly 1 dim'):
fc4(torch.rand(10, 10).cuda(self.rank))
fc5 = torch.nn.Linear(7, 10).cuda(self.rank)
shard_parameter(fc5, "weight", spec)
with self.assertRaisesRegex(ValueError, 'Input dim: 13 does not match appropriate weight dim: 7'):
fc5(torch.rand(20, 10, 13).cuda(self.rank))
fc6 = torch.nn.Linear(10, 10).cuda(self.rank)
del fc6.weight
enumerable_spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[0, 5],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:2/cuda:2",
),
ShardMetadata(
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="rank:3/cuda:3",
)
])
fc6.weight = empty(enumerable_spec, 10, 10)
# Sharded Tensor metadata has parenthesis imbalance issue when using re.compile
error_msg = r"torch function 'linear', with args: (?s).* "
r"and kwargs: None not supported for ShardedTensor!"
with self.assertRaisesRegex(RuntimeError, error_msg):
fc6(torch.rand(10, 10).cuda(self.rank))
fc7 = torch.nn.Linear(10, 80).cuda(self.rank)
multiple_local_shard_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:2/cuda:2",
"rank:3/cuda:3",
"rank:3/cuda:3",
],
)
del fc7.weight
fc7.weight = empty(multiple_local_shard_spec, 80, 10)
with self.assertRaisesRegex(ValueError, 'Only one local shard supported!'):
fc7(torch.rand(10, 10).cuda(self.rank))
|
import copy
import sys
import torch
import torch.distributed as dist
from torch.distributed._shard.api import (
shard_parameter,
_collect_local_shard,
_reshard_output,
)
from torch.distributed._shard.sharded_optim import (
ShardedOptimizer,
)
from torch.distributed._shard.sharded_tensor import (
empty,
)
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
EnumerableShardingSpec,
ShardMetadata,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
clone_module_parameter,
generate_chunk_sharding_specs_for_test,
generate_local_weight_sharding_params_for_test,
)
class TestShardedTensorOpsLinear(ShardedTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_composable/test_replicate.py
|
_init_pg
|
def _init_pg(self):
dist.init_process_group(
backend="gloo",
rank=self.rank,
world_size=self.world_size,
store=dist.FileStore(self.file_name, self.world_size),
)
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.fsdp import fully_shard
from torch.distributed._composable.replicate import replicate
from torch.distributed._tensor import DTensor
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class ReplicateStateDictTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_replicate.py
|
test_replicate_single_module_save_load
|
def test_replicate_single_module_save_load(self):
"""
Tests that replicate() on a single module state_dict
matches local module state_dict.
"""
model = Net()
replicate_model = replicate(deepcopy(model))
local_sd = model.state_dict()
ddp_sd = replicate_model.state_dict()
self._check_state_dict_parity(local_sd, ddp_sd)
|
def test_replicate_single_module_save_load(self):
"""
Tests that replicate() on a single module state_dict
matches local module state_dict.
"""
self._init_pg()
model = Net()
replicate_model = replicate(deepcopy(model))
local_sd = model.state_dict()
ddp_sd = replicate_model.state_dict()
self._check_state_dict_parity(local_sd, ddp_sd)
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.replicate import replicate
from torch.testing._internal.common_distributed import MultiProcessTestCase
from torch.testing._internal.common_utils import run_tests
class ReplicateStateDictTest(MultiProcessTestCase):
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.fsdp import fully_shard
from torch.distributed._composable.replicate import replicate
from torch.distributed._tensor import DTensor
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class ReplicateStateDictTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_composable/test_replicate.py
|
test_replicate_non_root_multiple_save_load
|
def test_replicate_non_root_multiple_save_load(self):
"""
Tests tha replicate() on multiple submodules matches
local module state_dict.
"""
model = Net()
replicate_model = deepcopy(model)
replicate(replicate_model.fc1)
replicate(replicate_model.fc2)
replicate(replicate_model.fc3)
local_sd = model.state_dict()
ddp_sd = replicate_model.state_dict()
self._check_state_dict_parity(local_sd, ddp_sd)
|
def test_replicate_non_root_multiple_save_load(self):
"""
Tests tha replicate() on multiple submodules matches
local module state_dict.
"""
self._init_pg()
model = Net()
replicate_model = deepcopy(model)
replicate(replicate_model.fc1)
replicate(replicate_model.fc2)
replicate(replicate_model.fc3)
local_sd = model.state_dict()
ddp_sd = replicate_model.state_dict()
self._check_state_dict_parity(local_sd, ddp_sd)
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.replicate import replicate
from torch.testing._internal.common_distributed import MultiProcessTestCase
from torch.testing._internal.common_utils import run_tests
class ReplicateStateDictTest(MultiProcessTestCase):
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.fsdp import fully_shard
from torch.distributed._composable.replicate import replicate
from torch.distributed._tensor import DTensor
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class ReplicateStateDictTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_composable/test_replicate.py
|
_init_pg
|
dist.init_process_group(
backend="gloo",
rank=self.rank,
world_size=self.world_size,
store=dist.FileStore(self.file_name, self.world_size),
)
|
def _init_pg(self):
dist.init_process_group(
backend="gloo",
rank=self.rank,
world_size=self.world_size,
store=dist.FileStore(self.file_name, self.world_size),
)
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.fsdp import fully_shard
from torch.distributed._composable.replicate import replicate
from torch.distributed._tensor import DTensor
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class ReplicateStateDictTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/_composable/test_replicate.py
|
_compare_module
|
def _compare_module(self, mod, replicate_mod):
dist.init_process_group(
backend="gloo",
rank=self.rank,
world_size=self.world_size,
store=dist.FileStore(self.file_name, self.world_size),
)
local_batch_size = 1
global_batch_size = self.world_size * local_batch_size
input = torch.randn(global_batch_size, 2)
target = torch.randn(global_batch_size, 4)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
for param in model.parameters():
with torch.no_grad():
param -= param.grad
param.grad = None
for iteration in range(2):
step_model(mod, input, target)
step_model(
replicate_mod,
input[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
target[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
)
self.assertEqual(
len(list(mod.parameters())),
len(list(replicate_mod.parameters())),
)
for i, j in zip(mod.parameters(), replicate_mod.parameters()):
self.assertEqual(i, j, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(iteration)
input = input[torch.randperm(global_batch_size)]
|
def _compare_module(self, mod, replicate_mod):
local_batch_size = 1
global_batch_size = self.world_size * local_batch_size
input = torch.randn(global_batch_size, 2)
target = torch.randn(global_batch_size, 2)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
for param in model.parameters():
with torch.no_grad():
param -= param.grad
param.grad = None
for iteration in range(2):
step_model(mod, input, target)
step_model(
replicate_mod,
input[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
target[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
)
self.assertEqual(
len(list(mod.parameters())),
len(list(replicate_mod.parameters())),
)
for i, j in zip(mod.parameters(), replicate_mod.parameters()):
self.assertEqual(i, j, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(iteration)
input = input[torch.randperm(global_batch_size)]
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.replicate import replicate
from torch.testing._internal.common_distributed import MultiProcessTestCase
from torch.testing._internal.common_utils import run_tests
class ReplicateTest(MultiProcessTestCase):
|
import os
from copy import deepcopy
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.distributed._composable.fsdp import fully_shard
from torch.distributed._composable.replicate import replicate
from torch.distributed._tensor import DTensor
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests
class ReplicateTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_composable/test_compose.py
|
test_checkpoint_fully_shard_cast_forward_inputs
|
def test_checkpoint_fully_shard_cast_forward_inputs(self):
self.run_subtests(
{
"checkpoint_strict_submodule": [False, True],
},
self._test_checkpoint_fully_shard_cast_forward_inputs,
)
|
import copy
import sys
from typing import Dict
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed._composable import checkpoint, fully_shard, replicate
from torch.distributed._shard.sharded_tensor import ShardedTensor
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType
from torch.distributed.fsdp.api import MixedPrecision, ShardingStrategy
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.testing._internal.common_dist_composable import (
CompositeModel,
CompositeParamModel,
UnitModule,
)
from torch.testing._internal.common_distributed import (
SaveForwardInputsModel,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
class TestFSDPCheckpoint(FSDPTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_composable/test_compose.py
|
test_state_dict_fsdp_submodules
|
instantiate_parametrized_tests(TestFSDPCheckpoint)
if __name__ == "__main__":
run_tests()
|
def test_state_dict_fsdp_submodules(self):
model = CompositeModel(device=torch.device("cuda"))
full_shard_args = {"strategy": ShardingStrategy.FULL_SHARD}
no_shard_args = {"strategy": ShardingStrategy.NO_SHARD}
model.u1 = fully_shard(model.u1, **full_shard_args)
model.u2 = fully_shard(model.u2, **no_shard_args)
FSDP.set_state_dict_type(
model,
StateDictType.SHARDED_STATE_DICT,
)
state_dict = model.state_dict()
for fqn, tensor in state_dict.items():
if "u1" in fqn:
self.assertIsInstance(tensor, ShardedTensor)
elif "u2" in fqn:
self.assertIsInstance(tensor, torch.Tensor)
# Ensure that get_state_dict_type can still correctly get the settings.
_ = FSDP.get_state_dict_type(model)
|
import copy
import sys
from typing import Dict
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed._composable import checkpoint, fully_shard, replicate
from torch.distributed._shard.sharded_tensor import ShardedTensor
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType
from torch.distributed.fsdp.api import MixedPrecision, ShardingStrategy
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
from torch.testing._internal.common_dist_composable import (
CompositeModel,
CompositeParamModel,
UnitModule,
)
from torch.testing._internal.common_distributed import (
SaveForwardInputsModel,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
class TestFSDPCheckpoint(FSDPTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/_composable/test_contract.py
|
__init__
|
def __init__(self):
super().__init__()
self.seq1 = nn.Sequential(*[nn.Linear(10, 10) for _ in range(2)])
self.seq2 = nn.Sequential(*[nn.Linear(10, 10) for _ in range(2)])
self.p = nn.Parameter(torch.randn(10, 10), requires_grad=True)
self.b = torch.zeros(1) # buffer
|
def __init__(self) -> None:
super().__init__()
self.seq1 = nn.Sequential(*[nn.Linear(10, 10) for _ in range(2)])
self.seq2 = nn.Sequential(*[nn.Linear(10, 10) for _ in range(2)])
self.p = nn.Parameter(torch.randn(10, 10), requires_grad=True)
self.b = torch.zeros(1) # buffer
|
from copy import deepcopy
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._composable import _get_registry, contract
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
class ToyModel(nn.Module):
|
from copy import deepcopy
from typing import List, Tuple
import torch
import torch.nn as nn
from torch.distributed._composable import _get_registry, contract
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
class ToyModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_shard/sharded_tensor/test_logger.py
|
test_get_or_create_logger
|
def test_get_or_create_logger(self):
logger = _get_or_create_logger()
self.assertIsNotNone(logger)
self.assertEqual(1, len(logger.handlers))
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
|
import logging
from torch.distributed._shard.sharded_tensor.logger import _get_or_create_logger
from torch.testing._internal.common_utils import run_tests, TestCase
class ShardingSpecLoggerTest(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_shard/sharded_tensor/test_megatron_prototype.py
|
assertEdistNorm
|
def assertEdistNorm(self, t1, t2):
"""
Use a normalized euclidean distance measure to validate two tensors
are close since comparing each element individually is not a good
measure where majority of elements are similar and maybe only a few
elements are slightly off.
"""
dist = torch.sqrt(((t1 - t2) ** 2).sum() / t1.numel())
self.assertTrue(dist.item() <= 0.5)
|
import copy
import sys
import torch
import torch.distributed as dist
from torch.distributed._shard.sharded_optim import (
ShardedOptimizer,
)
from torch.distributed._shard.api import (
shard_parameter,
_reshard_output,
_collect_local_shard
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
clone_module_parameter,
generate_chunk_sharding_specs_for_test,
generate_local_weight_sharding_params_for_test,
)
from torch.testing._internal.distributed._shard.test_common import SimpleMegatronLM
class TestShardedTensorMegatronLinear(ShardedTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_shard/sharded_tensor/test_megatron_prototype.py
|
_weight_override
|
def _weight_override(module_dst, module_src):
module_dst.fc1.weight = clone_module_parameter(module_src.fc1, "weight")
module_dst.fc1.bias = clone_module_parameter(module_src.fc1, "bias")
module_dst.fc2.weight = clone_module_parameter(module_src.fc2, "weight")
module_dst.fc2.bias = clone_module_parameter(module_src.fc2, "bias")
|
import copy
import sys
import torch
import torch.distributed as dist
from torch.distributed._shard.sharded_optim import (
ShardedOptimizer,
)
from torch.distributed._shard.api import (
shard_parameter,
_reshard_output,
_collect_local_shard
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
clone_module_parameter,
generate_chunk_sharding_specs_for_test,
generate_local_weight_sharding_params_for_test,
)
from torch.testing._internal.distributed._shard.test_common import SimpleMegatronLM
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_shard/sharded_tensor/test_megatron_prototype.py
|
_shard_parameter
|
def _shard_parameter(module, spec):
shard_parameter(module.fc1, "weight", spec[0])
shard_parameter(module.fc2, "weight", spec[1])
# Use same seed.
torch.manual_seed(0)
local_megatron_lm = SimpleMegatronLM(linear_size, rank=self.rank, dtype=dtype)
sharded_megatron_lm = SimpleMegatronLM(linear_size, dtype=dtype)
_weight_override(sharded_megatron_lm, local_megatron_lm)
# Shard the parameter. First col-wise sharding and then row-wise
_shard_parameter(sharded_megatron_lm, spec)
# Setup resharding of output.
reshard_spec = copy.deepcopy(spec[1])
reshard_spec.placements.sort(key=lambda placement: placement.rank())
reshard_spec.dim = 0
sharded_megatron_lm = _collect_local_shard(
_reshard_output(sharded_megatron_lm, reshard_spec)
)
torch.manual_seed(self.rank) # inputs different on each rank
inp = torch.rand(*input_size, requires_grad=True, device=self.rank, dtype=dtype)
# Run local computation
local_output = local_megatron_lm(inp)
# Compute loss and run backward pass.
local_output.sum().backward()
# Save and reset input grads.
local_input_grad = inp.grad
self.assertIsNotNone(inp.grad)
inp.grad = None
# Run sharded computation
sharded_output = sharded_megatron_lm(inp)
# Verify local and sharded results
self.assertEqual(local_output, sharded_output, atol=1e-3, rtol=1e-6)
sharded_output.sum().backward()
sharded_input_grad = inp.grad
self.assertIsNotNone(inp.grad)
# Verify sharded and local grads.
self.assertEqual(local_input_grad, sharded_input_grad, atol=1e-3, rtol=1e-6)
(
local_weight_grad_fc1,
local_weight_grad_fc2,
) = local_megatron_lm.get_weight_grads()
local_bias_grad_fc1, local_bias_grad_fc2 = local_megatron_lm.get_bias_grads()
# Verify that weights in both layers and biases in the sharded linear has non-None grad.
(
sharded_weight_fc1,
sharded_weight_fc2,
) = sharded_megatron_lm.get_weights()
bias_grad_fc1, bias_grad_fc2 = sharded_megatron_lm.get_bias_grads()
self.assertNotEqual(sharded_weight_fc1.grad, None)
self.assertNotEqual(sharded_weight_fc2.grad, None)
self.assertNotEqual(bias_grad_fc1, None)
self.assertNotEqual(bias_grad_fc2, None)
# Shard the local linear's weight grad so that we can compare.
dist.all_reduce(local_weight_grad_fc1)
dist.all_reduce(local_weight_grad_fc2)
dist.all_reduce(local_bias_grad_fc1)
dist.all_reduce(local_bias_grad_fc2)
local_weight_fc1, local_weight_fc2 = local_megatron_lm.get_weights()
(
start_pos_fc1,
chunk_size_fc1,
) = generate_local_weight_sharding_params_for_test(
local_weight_fc1, 0, TEST_GPU_NUM, spec[0], self.rank
)
local_grad_narrowed_fc1 = local_weight_grad_fc1.narrow(
0, start_pos_fc1, chunk_size_fc1
)
(
start_pos_fc2,
chunk_size_fc2,
) = generate_local_weight_sharding_params_for_test(
local_weight_fc2, 1, TEST_GPU_NUM, spec[1], self.rank
)
local_grad_narrowed_fc2 = local_weight_grad_fc2.narrow(
1, start_pos_fc2, chunk_size_fc2
)
# Test backward gradient calculation.
self.assertEdistNorm(sharded_weight_fc1.grad, local_grad_narrowed_fc1)
self.assertEdistNorm(sharded_weight_fc2.grad, local_grad_narrowed_fc2)
self.assertEdistNorm(bias_grad_fc1, local_bias_grad_fc1)
self.assertEdistNorm(bias_grad_fc2, local_bias_grad_fc2)
# Test optimizer.
bias_fc1, bias_fc2 = sharded_megatron_lm.get_biases()
local_bias_fc1, local_bias_fc2 = local_megatron_lm.get_biases()
self.assertEdistNorm(bias_fc1, local_bias_fc1)
self.assertEdistNorm(bias_fc2, local_bias_fc2)
self.assertEdistNorm(bias_fc1.grad, local_bias_fc1.grad)
self.assertEdistNorm(bias_fc2.grad, local_bias_fc2.grad)
previous_sharded_weight_fc1 = sharded_weight_fc1.clone()
previous_sharded_weight_fc2 = sharded_weight_fc2.clone()
previous_bias_fc1 = bias_fc1.clone()
previous_bias_fc2 = bias_fc2.clone()
optim = torch.optim.SGD(local_megatron_lm.parameters(), lr=0.1)
optim.step()
sharded_optim = ShardedOptimizer(
dict(sharded_megatron_lm.named_parameters()),
torch.optim.SGD,
lr=0.1,
)
sharded_optim.step()
local_weight_fc1_narrowed = local_weight_fc1.narrow(
0, start_pos_fc1, chunk_size_fc1
)
local_weight_fc2_narrowed = local_weight_fc2.narrow(
1, start_pos_fc2, chunk_size_fc2
)
# Test weight value after optimizer.
self.assertEqual(sharded_weight_fc1.size(), local_weight_fc1_narrowed.size())
self.assertEqual(sharded_weight_fc2.size(), local_weight_fc2_narrowed.size())
self.assertNotEqual(previous_sharded_weight_fc1, sharded_weight_fc1)
self.assertNotEqual(previous_sharded_weight_fc2, sharded_weight_fc2)
self.assertEdistNorm(sharded_weight_fc1, local_weight_fc1_narrowed)
self.assertEdistNorm(sharded_weight_fc2, local_weight_fc2_narrowed)
# Test bias value after optimizer.
local_bias_fc1, local_bias_fc2 = local_megatron_lm.get_biases()
self.assertNotEqual(previous_bias_fc1, bias_fc1)
self.assertEdistNorm(bias_fc1, local_bias_fc1)
self.assertNotEqual(previous_bias_fc2, bias_fc2)
self.assertEdistNorm(bias_fc2, local_bias_fc2)
|
import copy
import sys
import torch
import torch.distributed as dist
from torch.distributed._shard.sharded_optim import (
ShardedOptimizer,
)
from torch.distributed._shard.api import (
shard_parameter,
_reshard_output,
_collect_local_shard
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
clone_module_parameter,
generate_chunk_sharding_specs_for_test,
generate_local_weight_sharding_params_for_test,
)
from torch.testing._internal.distributed._shard.test_common import SimpleMegatronLM
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_shard/sharded_tensor/test_megatron_prototype.py
|
test_megatron_two_layer_prototype
|
def test_megatron_two_layer_prototype(self):
colwise_sharding_spec = generate_chunk_sharding_specs_for_test(0)
rowwise_sharding_spec = generate_chunk_sharding_specs_for_test(1)
for spec in zip(colwise_sharding_spec, rowwise_sharding_spec):
self._run_megatron_linear(spec, [22, 17], [[17, 12], [12, 29]], torch.float16)
self._run_megatron_linear(spec, [28, 21], [[21, 11], [11, 29]], torch.float32)
self._run_megatron_linear(spec, [37, 23], [[23, 13], [13, 24]], torch.float64)
self._run_megatron_linear(spec, [24, 15], [[15, 14], [14, 20]], torch.float16)
# Test multiple input dims
self._run_megatron_linear(spec, [10, 22, 17], [[17, 12], [12, 29]], torch.float32)
self._run_megatron_linear(spec, [13, 28, 21], [[21, 11], [11, 29]], torch.float16)
self._run_megatron_linear(spec, [27, 37, 23], [[23, 13], [13, 24]], torch.float32)
self._run_megatron_linear(spec, [100, 24, 15], [[15, 14], [14, 20]], torch.float64)
# Test single input dim
self._run_megatron_linear(spec, [17], [[17, 12], [12, 29]], torch.float16)
self._run_megatron_linear(spec, [21], [[21, 11], [11, 29]], torch.float32)
self._run_megatron_linear(spec, [23], [[23, 13], [13, 24]], torch.float64)
self._run_megatron_linear(spec, [15], [[15, 14], [14, 20]], torch.float16)
|
import copy
import sys
import torch
import torch.distributed as dist
from torch.distributed._shard.sharded_optim import (
ShardedOptimizer,
)
from torch.distributed._shard.api import (
shard_parameter,
_reshard_output,
_collect_local_shard
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
clone_module_parameter,
generate_chunk_sharding_specs_for_test,
generate_local_weight_sharding_params_for_test,
)
from torch.testing._internal.distributed._shard.test_common import SimpleMegatronLM
class TestShardedTensorMegatronLinear(ShardedTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_shard/test_replicated_tensor.py
|
test_getitem
|
def test_getitem(self):
local_tensor = torch.rand(3, 3, device=self.rank)
replicated_tensor = ReplicatedTensor(local_tensor)
replicated_tensor_view = replicated_tensor[0]
local_tensor_view = local_tensor[0]
self.assertIsInstance(replicated_tensor_view, ReplicatedTensor)
self.assertEqual(local_tensor_view, replicated_tensor_view)
|
import io
import torch
import torch.distributed._shard.sharded_tensor as sharded_tensor
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed._shard import _shard_tensor
from torch.distributed._shard.replicated_tensor import ReplicatedTensor
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
gen_binary_op_func
)
from torch.testing._internal.distributed._shard.sharded_tensor import TEST_GPU_NUM
class TestReplicatedTensor(ShardedTensorTestBase):
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_tensor/debug/test_comm_mode.py
|
__init__
|
def __init__(self, device):
super().__init__()
self.model = MLPModule(device=device)
|
import torch
import torch.distributed as dist
import torch.distributed._functional_collectives as funcol
import torch.nn as nn
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed._tensor.placement_types import Shard
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_distributed import requires_nccl
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule
from torch.testing._internal.distributed.fake_pg import FakeStore
c10d_functional = torch.ops.c10d_functional
c10d_ops = torch.ops.c10d
class WrapperModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/debug/test_comm_mode.py
|
forward
|
def forward(self, x):
x = funcol.all_gather_tensor(x, 0, world_pg)
x = funcol.reduce_scatter_tensor(x, "sum", 0, world_pg)
out = self.model(x)
return funcol.all_reduce(out, "sum", world_pg)
|
import torch
import torch.distributed as dist
import torch.distributed._functional_collectives as funcol
import torch.nn as nn
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed._tensor.placement_types import Shard
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_distributed import requires_nccl
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule
from torch.testing._internal.distributed.fake_pg import FakeStore
c10d_functional = torch.ops.c10d_functional
c10d_ops = torch.ops.c10d
class WrapperModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/debug/test_comm_mode.py
|
test_comm_mode_coalesced
|
def test_comm_mode_coalesced(self):
world_pg = self.world_pg
class WrapperModelCoalesced(nn.Module):
def __init__(self, device):
super().__init__()
self.model = MLPModule(device=device)
def forward(self, x):
x = funcol.all_gather_tensor(x, 0, world_pg)
x = funcol.reduce_scatter_tensor(x, "sum", 0, world_pg)
out = self.model(x)
return funcol.all_reduce_coalesced([out], "sum", world_pg)
model = WrapperModelCoalesced(self.device_type)
comm_mode = CommDebugMode()
with comm_mode:
model(torch.randn(20, 10, device=self.device_type))
comm_counts = comm_mode.get_comm_counts()
self.assertEqual(comm_mode.get_total_counts(), 3)
self.assertEqual(comm_counts[c10d_functional.all_reduce_coalesced], 1)
self.assertEqual(comm_counts[c10d_functional.all_gather_into_tensor], 1)
self.assertEqual(comm_counts[c10d_functional.reduce_scatter_tensor], 1)
|
import torch
import torch.distributed as dist
import torch.distributed._functional_collectives as funcol
import torch.nn as nn
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed._tensor.placement_types import Shard
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_distributed import requires_nccl
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule
from torch.testing._internal.distributed.fake_pg import FakeStore
c10d_functional = torch.ops.c10d_functional
c10d_ops = torch.ops.c10d
class TestCommMode(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/debug/test_comm_mode.py
|
__init__
|
def __init__(self, device):
super().__init__()
self.model = MLPModule(device=device)
|
import torch
import torch.distributed as dist
import torch.distributed._functional_collectives as funcol
import torch.nn as nn
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed._tensor.placement_types import Shard
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_distributed import requires_nccl
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule
from torch.testing._internal.distributed.fake_pg import FakeStore
c10d_functional = torch.ops.c10d_functional
c10d_ops = torch.ops.c10d
class WrapperModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/debug/test_comm_mode.py
|
forward
|
def forward(self, x):
x = funcol.all_gather_tensor(x, 0, world_pg)
x = funcol.reduce_scatter_tensor(x, "sum", 0, world_pg)
out = self.model(x)
return funcol.all_reduce(out, "sum", world_pg)
|
import torch
import torch.distributed as dist
import torch.distributed._functional_collectives as funcol
import torch.nn as nn
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed._tensor.placement_types import Shard
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_distributed import requires_nccl
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule
from torch.testing._internal.distributed.fake_pg import FakeStore
c10d_functional = torch.ops.c10d_functional
c10d_ops = torch.ops.c10d
class WrapperModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/debug/test_comm_mode.py
|
f
|
def f(x, y):
return torch.mm(x, y)
comm_mode = CommDebugMode()
x = torch.randn(4, 8, requires_grad=True)
y = torch.randn(4, 32, requires_grad=True)
x_dtensor = DTensor.from_local(x, mesh, [Shard(0)], run_check=False)
y_dtensor = DTensor.from_local(y, mesh, [Shard(0)], run_check=False)
with comm_mode:
f(x_dtensor, y_dtensor)
comm_counts = comm_mode.get_comm_counts()
self.assertEqual(comm_mode.get_total_counts(), 1)
self.assertEqual(comm_counts[c10d_functional.all_reduce], 0)
self.assertEqual(comm_counts[c10d_functional.all_gather_into_tensor], 1)
self.assertEqual(comm_counts[c10d_functional.reduce_scatter_tensor], 0)
|
import torch
import torch.distributed as dist
import torch.distributed._functional_collectives as funcol
import torch.nn as nn
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed._tensor.placement_types import Shard
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_distributed import requires_nccl
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule
from torch.testing._internal.distributed.fake_pg import FakeStore
c10d_functional = torch.ops.c10d_functional
c10d_ops = torch.ops.c10d
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/debug/test_comm_mode.py
|
test_comm_mode_with_c10d
|
def test_comm_mode_with_c10d(self):
if not torch.cuda.is_available():
return
world_pg = self.world_pg
inp = torch.rand(2, 8, 16).cuda()
all_gather_out = inp.new_empty(self.world_size * 2, 8, 16)
comm_mode = CommDebugMode()
# tests c10d all_reduce tracing
with comm_mode:
dist.all_reduce(inp)
self.checksAssert(comm_mode, c10d_ops.allreduce_, 1, 1)
# tests c10d all_gather_into_tensor tracing
with comm_mode:
dist.all_gather_into_tensor(all_gather_out, inp)
self.checksAssert(comm_mode, c10d_ops._allgather_base_, 1, 1)
# tests c10d reduce_scatter tracing
with comm_mode:
dist.reduce_scatter_tensor(inp, all_gather_out)
self.checksAssert(comm_mode, c10d_ops._reduce_scatter_base_, 1, 1)
# tests c10d broadcast tracing
with comm_mode:
dist.broadcast(inp, 0)
self.checksAssert(comm_mode, c10d_ops.broadcast_, 1, 1)
# tests c10d gather tracing
with comm_mode:
dist.gather(inp, None, 0)
self.checksAssert(comm_mode, c10d_ops.gather_, 1, 1)
# tests c10d reduce tracing
with comm_mode:
dist.reduce(inp, 0)
self.checksAssert(comm_mode, c10d_ops.reduce_, 1, 1)
# tests c10d scatter tracing
with comm_mode:
dist.scatter(inp, None, 0)
self.checksAssert(comm_mode, c10d_ops.scatter_, 1, 1)
# tests c10d all_gather tracing
output_list = []
with comm_mode:
dist.all_gather(output_list, inp, None)
self.checksAssert(comm_mode, c10d_ops.allgather_, 1, 1)
# tests c10d allgather_coalesced_ tracing
output_list = []
with comm_mode:
dist.all_gather_coalesced(output_list, [inp], None)
self.checksAssert(comm_mode, c10d_ops.allgather_coalesced_, 1, 1)
# tests c10d allgather_into_tensor_coalesced_ tracing
with comm_mode, dist._coalescing_manager():
dist.all_gather_into_tensor(all_gather_out, inp)
self.checksAssert(comm_mode, c10d_ops.allgather_into_tensor_coalesced_, 1, 1)
# tests c10d allreduce_coalesced
with comm_mode:
dist.all_reduce_coalesced(inp)
self.checksAssert(comm_mode, c10d_ops.allreduce_coalesced_, 1, 1)
# tests c10d reduce_scatter_
with comm_mode:
dist.reduce_scatter(all_gather_out, [inp])
self.checksAssert(comm_mode, c10d_ops.reduce_scatter_, 1, 1)
# tests c10d reduce_scatter_tensor_coalesced
with comm_mode as A, dist._coalescing_manager() as B:
dist.reduce_scatter_tensor(all_gather_out, inp)
self.checksAssert(comm_mode, c10d_ops.reduce_scatter_tensor_coalesced_, 1, 1)
# tests c10d alltoall_
with comm_mode:
dist.all_to_all([inp], [inp])
self.checksAssert(comm_mode, c10d_ops.alltoall_, 1, 1)
# tests c10d alltoall_base_
with comm_mode:
dist.all_to_all_single(inp, inp)
self.checksAssert(comm_mode, c10d_ops.alltoall_base_, 1, 1)
|
import torch
import torch.distributed as dist
import torch.distributed._functional_collectives as funcol
import torch.nn as nn
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed._tensor.placement_types import Shard
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_distributed import requires_nccl
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule
from torch.testing._internal.distributed.fake_pg import FakeStore
c10d_functional = torch.ops.c10d_functional
c10d_ops = torch.ops.c10d
class TestCommMode(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/debug/test_comm_mode_features.py
|
check_same_set_of_keys
|
def check_same_set_of_keys(self, dict1, dict2):
"""
Used to ensure the comm_mode parameter/sharding dictionaries contain the same information produced by the
ground truth
"""
dict1_keys = []
dict2_keys = []
for key in dict1:
for nested_key in dict1[key]:
dict1_keys.append((key, nested_key))
for key in dict2:
for nested_key in dict2[key]:
dict2_keys.append((key, nested_key))
self.assertEqual(len(dict1_keys), len(dict2_keys))
for i in range(len(dict1_keys)):
self.assertEqual(dict1_keys[i], dict2_keys[i])
# generates the ground truth parameter and sharding info
|
from typing import Any, Dict
import torch
from torch.distributed._tensor import DeviceMesh
from torch.distributed._tensor.api import distribute_tensor, DTensor
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
MLPStacked,
ModelArgs,
NUM_DEVICES,
skip_unless_torch_gpu,
Transformer,
with_comms,
)
c10d_functional = torch.ops.c10d_functional
class TestCommModeFeatures(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/experimental/test_local_map.py
|
mm_allreduce_forward_decorated
|
def mm_allreduce_forward_decorated(device_mesh, A, B):
partial_sum_tensor = torch.mm(A, B)
return funcol.all_reduce(partial_sum_tensor, "sum", device_mesh).wait()
|
from functools import partial
import torch
import torch.distributed._functional_collectives as funcol
from torch.distributed._tensor import (
distribute_tensor,
DTensor,
init_device_mesh,
Replicate,
Shard,
)
from torch.distributed._tensor.experimental import local_map
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
funcol_py = torch.ops.c10d_functional
row_wise = [Shard(0)] # row-wise sharding placements on 1-d mesh
col_wise = [Shard(1)] # col-wise sharding placements on 1-d mesh
replicate = [Replicate()] # replicate placements on 1-d mesh
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/experimental/test_local_map.py
|
mul_forward
|
def mul_forward(X, scalar): # no device mesh needed since we don't do collective
return torch.mul(X, scalar)
class TestLocalMap(DTensorTestBase):
@property
def world_size(self):
return 2
# simple correctness check
@with_comms
def test_local_map_correctness(self):
device_mesh = init_device_mesh(
device_type=self.device_type, mesh_shape=(self.world_size,)
)
comm_mode = CommDebugMode()
# Y = X @ W
X = torch.randn(16, 8, device=self.device_type, requires_grad=False)
W = torch.randn(8, 12, device=self.device_type, requires_grad=False)
Y = torch.mm(X, W)
X_dt = distribute_tensor(
X, device_mesh, col_wise
) # col-wisely sharded X tensor
W_dt = distribute_tensor(
W, device_mesh, row_wise
) # row-wisely sharded W tensor
# Test 1: use the function returned from calling local_map
# get the function wrapped with DTensor/Tensor convertion
# mm_allreduce_forward is a function that applies to Tensors with manual collective
# local_mm_allreduce_forward is the function that does the same but applies to
# DTensors' `_local_tensor`.
local_mm_allreduce_forward = local_map(
mm_allreduce_forward,
out_placements=replicate,
in_placements=(None, col_wise, row_wise),
device_mesh=device_mesh,
)
with comm_mode:
Y_dt = local_mm_allreduce_forward(device_mesh, X_dt, W_dt)
# output redistribution to Replicate
self.assertEqual(comm_mode.get_total_counts(), 1)
# check output placements
for placement in Y_dt.placements:
self.assertTrue(placement.is_replicate())
# check output value
self.assertEqual(Y_dt.to_local(), Y)
# Test 2: use the local_map decorator
with comm_mode:
Y_dt = mm_allreduce_forward_decorated(device_mesh, X_dt, W_dt)
# output redistribution to Replicate
self.assertEqual(comm_mode.get_total_counts(), 1)
# check output placements
for placement in Y_dt.placements:
self.assertTrue(placement.is_replicate())
# check output value
self.assertEqual(Y_dt.to_local(), Y)
# check for `out_placements`
@with_comms
def test_local_map_out_placements(self):
# Test 1: wrap out into DTensor w/ `out_placements`
device_mesh = init_device_mesh(
device_type=self.device_type, mesh_shape=(self.world_size,)
)
comm_mode = CommDebugMode()
# X.equal(Y)
X = torch.randn(8, 8, device=self.device_type, requires_grad=False)
Y = torch.randn(8, 8, device=self.device_type, requires_grad=False)
X_dt = distribute_tensor(X, device_mesh, row_wise)
Y_dt = distribute_tensor(Y, device_mesh, row_wise)
local_equal_allgather_forward = local_map(
equal_allgather_forward,
out_placements=None,
)
with comm_mode:
equal_dt = local_equal_allgather_forward(device_mesh, X_dt, Y_dt) # a bool
self.assertEqual(comm_mode.get_total_counts(), 1)
self.assertTrue(not equal_dt)
self.assertTrue(not (X.equal(Y)))
# Test 2: directly return out if no argument is DTensor
# matmul in DDP
X = torch.randn(
4 // self.world_size, 4, device=self.device_type, requires_grad=False
)
W = torch.randn(4, 4, device=self.device_type, requires_grad=False)
local_mm_all_gather_forward = local_map(
mm_all_gather_forward,
out_placements=row_wise,
in_placements=(None, row_wise, replicate),
)
with comm_mode:
Y = local_mm_all_gather_forward(device_mesh, X, W)
self.assertEqual(comm_mode.get_total_counts(), 1)
self.assertEqual(
comm_mode.get_comm_counts()[funcol_py.all_gather_into_tensor], 1
)
X_replicate = funcol.all_gather_tensor(X, 0, device_mesh).wait()
Y_replicate = torch.mm(X_replicate, W)
self.assertEqual(Y, Y_replicate) # Y is a torch.Tensor
# check for `in_placements` handling
@with_comms
def test_local_map_in_placements(self):
device_mesh = init_device_mesh(
device_type=self.device_type, mesh_shape=(self.world_size,)
)
comm_mode = CommDebugMode()
# Y = X @ W
X = torch.randn(16, 8, device=self.device_type, requires_grad=False)
W = torch.randn(8, 12, device=self.device_type, requires_grad=False)
Y = torch.mm(X, W)
X_dt = distribute_tensor(
X, device_mesh, row_wise
) # row-wisely sharded X tensor
W_dt = distribute_tensor(W, device_mesh, replicate) # replicate W tensor
# Test 1: explicitly pass `in_placements`
local_mm_forward = local_map(
mm_forward,
out_placements=row_wise,
in_placements=(row_wise, replicate),
device_mesh=device_mesh,
)
with comm_mode:
Y_dt = local_mm_forward(X_dt, W_dt)
# no communication should occur in this case
self.assertEqual(comm_mode.get_total_counts(), 0)
for placement in Y_dt.placements:
self.assertTrue(placement.is_shard(dim=0))
self.assertEqual(Y_dt.full_tensor(), Y)
# Test 2: `in_placements=None`
local_mm_forward = local_map(
mm_forward,
out_placements=row_wise,
device_mesh=device_mesh,
)
with comm_mode:
Y_dt = local_mm_forward(X_dt, W_dt)
self.assertEqual(comm_mode.get_total_counts(), 0)
for placement in Y_dt.placements:
self.assertTrue(placement.is_shard(dim=0))
self.assertEqual(Y_dt.full_tensor(), Y)
# Test 3: `None` placements for non-Tensor input argument
# Y = X * 2.0
local_mul_forward = local_map(
mul_forward,
in_placements=(row_wise, None),
out_placements=row_wise,
device_mesh=device_mesh,
)
Y = torch.mul(X, 2.0)
with comm_mode:
Y_dt = local_mul_forward(X_dt, 2.0)
self.assertEqual(comm_mode.get_total_counts(), 0)
for placement in Y_dt.placements:
self.assertTrue(placement.is_shard(dim=0))
self.assertEqual(Y_dt.full_tensor(), Y)
# Test 4: `None` placements for Tensor input argument
local_mm_forward = local_map(
mm_forward,
out_placements=None,
in_placements=(None, None),
device_mesh=device_mesh,
)
with comm_mode:
Y_dt_local = local_mm_forward(X_dt.to_local(), W_dt.to_local())
self.assertEqual(comm_mode.get_total_counts(), 0)
self.assertEqual(
DTensor.from_local(Y_dt_local, device_mesh, row_wise).full_tensor(),
torch.mm(X, W),
)
# Test 5: Some placements for Tensor input argument
local_mm_forward = local_map(
mm_forward,
out_placements=None,
in_placements=(replicate, row_wise),
device_mesh=device_mesh,
)
with comm_mode:
Y_dt_local = local_mm_forward(X_dt.to_local(), W_dt.to_local())
self.assertEqual(comm_mode.get_total_counts(), 0)
self.assertEqual(
DTensor.from_local(Y_dt_local, device_mesh, row_wise).full_tensor(),
torch.mm(X, W),
)
# Test 6: expect error - `None` placements for DTensor input argument
local_mm_forward = local_map(
mm_forward,
out_placements=row_wise,
in_placements=(row_wise, None),
device_mesh=device_mesh,
)
with self.assertRaisesRegex(AssertionError, "expects placements"):
Y_dt = local_mm_forward(X_dt, W_dt)
# check for `redistribute_inputs` handling
@with_comms
def test_local_map_redistribute(self):
device_mesh = init_device_mesh(
device_type=self.device_type, mesh_shape=(self.world_size,)
)
comm_mode = CommDebugMode()
# Y = X @ W
X = torch.randn(16, 8, device=self.device_type, requires_grad=False)
W = torch.randn(8, 12, device=self.device_type, requires_grad=False)
Y = torch.mm(X, W)
X_dt = distribute_tensor(
X, device_mesh, row_wise
) # row-wisely sharded X tensor which will be redistributed
W_dt = distribute_tensor(
W, device_mesh, col_wise
) # col-wisely sharded W tensor which will be redistributed
# Test 1: allow input redistribution
local_mm_allreduce_forward = local_map(
mm_allreduce_forward,
out_placements=replicate,
in_placements=(None, col_wise, row_wise),
device_mesh=device_mesh,
redistribute_inputs=True,
)
with comm_mode:
Y_dt = local_mm_allreduce_forward(device_mesh, X_dt, W_dt)
# 2 for input redistribution and 1 for output
self.assertEqual(comm_mode.get_total_counts(), 3)
for placement in Y_dt.placements:
self.assertTrue(placement.is_replicate())
self.assertEqual(Y_dt.to_local(), Y)
# Test 2: no input redistribution is allowed
local_mm_allreduce_forward = local_map(
mm_allreduce_forward,
out_placements=replicate,
in_placements=(None, col_wise, row_wise),
device_mesh=device_mesh,
redistribute_inputs=False,
)
with self.assertRaisesRegex(ValueError, "set redistribute_inputs=True"):
Y_dt = local_mm_allreduce_forward(device_mesh, X_dt, W_dt)
if __name__ == "__main__":
run_tests()
|
from functools import partial
import torch
import torch.distributed._functional_collectives as funcol
from torch.distributed._tensor import (
distribute_tensor,
DTensor,
init_device_mesh,
Replicate,
Shard,
)
from torch.distributed._tensor.experimental import local_map
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
funcol_py = torch.ops.c10d_functional
row_wise = [Shard(0)] # row-wise sharding placements on 1-d mesh
col_wise = [Shard(1)] # col-wise sharding placements on 1-d mesh
replicate = [Replicate()] # replicate placements on 1-d mesh
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/experimental/test_local_map.py
|
test_local_map_redistribute
|
def test_local_map_redistribute(self):
device_mesh = init_device_mesh(
device_type=self.device_type, mesh_shape=(self.world_size,)
)
comm_mode = CommDebugMode()
# Y = X @ W
X = torch.randn(16, 8, device=self.device_type, requires_grad=False)
W = torch.randn(8, 12, device=self.device_type, requires_grad=False)
Y = torch.mm(X, W)
X_dt = distribute_tensor(
X, device_mesh, row_wise
) # row-wisely sharded X tensor which will be redistributed
W_dt = distribute_tensor(
W, device_mesh, col_wise
) # col-wisely sharded W tensor which will be redistributed
# Test 1: allow input redistribution
local_mm_allreduce_forward = local_map(
mm_allreduce_forward,
out_placements=replicate,
in_placements=(None, col_wise, row_wise),
device_mesh=device_mesh,
redistribute_inputs=True,
)
with comm_mode:
Y_dt = local_mm_allreduce_forward(device_mesh, X_dt, W_dt)
# 2 for input redistribution and 1 for output
self.assertEqual(comm_mode.get_total_counts(), 3)
for placement in Y_dt.placements:
self.assertTrue(placement.is_replicate())
self.assertEqual(Y_dt.to_local(), Y)
# Test 2: no input redistribution is allowed
local_mm_allreduce_forward = local_map(
mm_allreduce_forward,
out_placements=replicate,
in_placements=(None, col_wise, row_wise),
device_mesh=device_mesh,
redistribute_inputs=False,
)
with self.assertRaisesRegex(ValueError, "set redistribute_inputs=True"):
Y_dt = local_mm_allreduce_forward(device_mesh, X_dt, W_dt)
|
from functools import partial
import torch
import torch.distributed._functional_collectives as funcol
from torch.distributed._tensor import (
distribute_tensor,
DTensor,
init_device_mesh,
Replicate,
Shard,
)
from torch.distributed._tensor.experimental import local_map
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
funcol_py = torch.ops.c10d_functional
row_wise = [Shard(0)] # row-wise sharding placements on 1-d mesh
col_wise = [Shard(1)] # col-wise sharding placements on 1-d mesh
replicate = [Replicate()] # replicate placements on 1-d mesh
class TestLocalMap(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/experimental/test_register_sharding.py
|
test_argmax
|
def test_argmax(self):
@register_sharding(aten.argmax.default)
def custom_argmax_sharding(x, dim, keepdim):
acceptable_shardings = []
all_replicate = ([Replicate()], [Replicate(), None, None])
acceptable_shardings.append(all_replicate)
if keepdim:
for sharding_dim in range(x.ndim):
if sharding_dim != dim:
all_sharded = (
[Shard(sharding_dim)],
[Shard(sharding_dim), None, None],
)
acceptable_shardings.append(all_sharded)
return acceptable_shardings
# check if the RuntimeSchemaInfo is derived correctly
# when the first int arg is optional
schema_info = DTensor._op_dispatcher.sharding_propagator.op_to_schema_info[
aten.argmax.default
]
self.assertEqual(schema_info.static_argnum, 1)
device_mesh = self.build_device_mesh()
x = torch.rand(8, 12, device=self.device_type)
dist_x = distribute_tensor(x, device_mesh, [Shard(0)])
local_y = torch.argmax(x, dim=1, keepdim=True)
dist_y = torch.argmax(dist_x, dim=1, keepdim=True)
self.assertTrue(dist_y.placements[0].is_shard(dim=0))
self.assertEqual(dist_y.full_tensor(), local_y)
|
import itertools
import torch
from torch.distributed._tensor import distribute_tensor, DTensor, Replicate, Shard
from torch.distributed._tensor.experimental import register_sharding
from torch.distributed._tensor.placement_types import DTensorSpec
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
aten = torch.ops.aten
class TestRegisterSharding(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/experimental/test_register_sharding.py
|
custom_argmax_sharding
|
def custom_argmax_sharding(x, dim, keepdim):
acceptable_shardings = []
all_replicate = ([Replicate()], [Replicate(), None, None])
acceptable_shardings.append(all_replicate)
if keepdim:
for sharding_dim in range(x.ndim):
if sharding_dim != dim:
all_sharded = (
[Shard(sharding_dim)],
[Shard(sharding_dim), None, None],
)
acceptable_shardings.append(all_sharded)
return acceptable_shardings
# check if the RuntimeSchemaInfo is derived correctly
# when the first int arg is optional
schema_info = DTensor._op_dispatcher.sharding_propagator.op_to_schema_info[
aten.argmax.default
]
self.assertEqual(schema_info.static_argnum, 1)
device_mesh = self.build_device_mesh()
x = torch.rand(8, 12, device=self.device_type)
dist_x = distribute_tensor(x, device_mesh, [Shard(0)])
local_y = torch.argmax(x, dim=1, keepdim=True)
dist_y = torch.argmax(dist_x, dim=1, keepdim=True)
self.assertTrue(dist_y.placements[0].is_shard(dim=0))
self.assertEqual(dist_y.full_tensor(), local_y)
|
import itertools
import torch
from torch.distributed._tensor import distribute_tensor, DTensor, Replicate, Shard
from torch.distributed._tensor.experimental import register_sharding
from torch.distributed._tensor.placement_types import DTensorSpec
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
aten = torch.ops.aten
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/debug/test_comm_mode_features.py
|
test_transformer_module_tracing
|
def test_transformer_module_tracing(self, is_seq_parallel=False):
"""
tests module-level tracing for more complicated transformer module and
ensures that comm_module depth and tracing dictionaries correctly reset
"""
device_mesh = DeviceMesh(
self.device_type,
torch.arange(0, NUM_DEVICES),
)
inp_size = [8, 10]
torch.manual_seed(0)
inp = torch.rand(*inp_size, device=self.device_type)
model = MLPModule(self.device_type)
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
model = parallelize_module(model, device_mesh, parallelize_plan)
comm_mode = CommDebugMode()
with comm_mode:
self.assertEqual(
len(comm_mode.advanced_module_tracker.module_helper_dict), 1
)
self.assertEqual(
comm_mode.comm_module_counts,
{"Global": {"forward": {}, "backward": {}}},
)
output_tp = model(inp)
model_args = ModelArgs(dropout_p=0.0)
model2 = Transformer(model_args).to(device=self.device_type)
model2 = Transformer.parallelize(model2, device_mesh, is_seq_parallel)
inp_size = [8, 8]
inp = torch.randint(model_args.vocab_size, inp_size, device=self.device_type)
inp = distribute_tensor(inp, device_mesh=device_mesh)
comm_mode = CommDebugMode()
with comm_mode:
output = model2(inp)
# checks to see if all collectives were correctly traced at the module-level
self.assertEqual(
comm_mode.comm_module_counts["Global"]["forward"][
c10d_functional.all_reduce
],
6,
)
self.assertEqual(
comm_mode.comm_module_counts["Global"]["forward"][
c10d_functional.all_gather_into_tensor
],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer"]["forward"][
c10d_functional.all_reduce
],
6,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer"]["forward"][
c10d_functional.all_gather_into_tensor
],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.tok_embeddings"]["forward"][
c10d_functional.all_reduce
],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.pos_embeddings"]["forward"][
c10d_functional.all_reduce
],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.0"]["forward"][
c10d_functional.all_reduce
],
2,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.0.attention"]["forward"][
c10d_functional.all_reduce
],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.0.attention.wo"][
"forward"
][c10d_functional.all_reduce],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.0.feed_forward"][
"forward"
][c10d_functional.all_reduce],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.0.feed_forward.w2"][
"forward"
][c10d_functional.all_reduce],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.1"]["forward"][
c10d_functional.all_reduce
],
2,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.1.attention"]["forward"][
c10d_functional.all_reduce
],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.1.attention.wo"][
"forward"
][c10d_functional.all_reduce],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.1.feed_forward"][
"forward"
][c10d_functional.all_reduce],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.layers.1.feed_forward.w2"][
"forward"
][c10d_functional.all_reduce],
1,
)
self.assertEqual(
comm_mode.comm_module_counts["Transformer.output"]["forward"][
c10d_functional.all_gather_into_tensor
],
1,
)
|
from typing import Any, Dict
import torch
from torch.distributed._tensor import DeviceMesh
from torch.distributed._tensor.api import distribute_tensor, DTensor
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
MLPStacked,
ModelArgs,
NUM_DEVICES,
skip_unless_torch_gpu,
Transformer,
with_comms,
)
c10d_functional = torch.ops.c10d_functional
class TestCommModeFeatures(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/debug/test_op_coverage.py
|
forward
|
def forward(self, x):
return torch.sigmoid(self.net2(self.relu(self.net1(x))))
|
import torch
import torch.nn as nn
from torch.distributed.tensor.debug._op_coverage import get_inductor_decomp_graphs
from torch.testing._internal.common_utils import run_tests, TestCase
class SimpleMLP(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/debug/test_op_coverage.py
|
test_trace_with_inductor_decomp
|
def test_trace_with_inductor_decomp(self):
model = SimpleMLP()
args = (torch.randn(8, 50),)
kwargs = {}
graphs = get_inductor_decomp_graphs(model, args, kwargs)
assert len(graphs) == 2, "Expect fwd + bwd graphs"
self.assertIsInstance(graphs[0], torch.fx.GraphModule)
self.assertIsInstance(graphs[1], torch.fx.GraphModule)
|
import torch
import torch.nn as nn
from torch.distributed.tensor.debug._op_coverage import get_inductor_decomp_graphs
from torch.testing._internal.common_utils import run_tests, TestCase
class TestOpCoverage(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.