library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/distributed/_tensor/experimental/test_local_map.py
equal_allgather_forward
def equal_allgather_forward(device_mesh, X, Y): eq = torch.tensor([torch.equal(X, Y)], device=X.device) eq_gather = funcol.all_gather_tensor(eq, 0, device_mesh) return torch.all(eq_gather).item()
from functools import partial import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import ( distribute_tensor, DTensor, init_device_mesh, Replicate, Shard, ) from torch.distributed._tensor.experimental import local_map from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) funcol_py = torch.ops.c10d_functional row_wise = [Shard(0)] # row-wise sharding placements on 1-d mesh col_wise = [Shard(1)] # col-wise sharding placements on 1-d mesh replicate = [Replicate()] # replicate placements on 1-d mesh
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/experimental/test_local_map.py
mm_all_gather_forward
def mm_all_gather_forward(device_mesh, A, B): local_mm_result = torch.mm(A, B) return funcol.all_gather_tensor(local_mm_result, 0, device_mesh).wait()
from functools import partial import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import ( distribute_tensor, DTensor, init_device_mesh, Replicate, Shard, ) from torch.distributed._tensor.experimental import local_map from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) funcol_py = torch.ops.c10d_functional row_wise = [Shard(0)] # row-wise sharding placements on 1-d mesh col_wise = [Shard(1)] # col-wise sharding placements on 1-d mesh replicate = [Replicate()] # replicate placements on 1-d mesh
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/experimental/test_local_map.py
mm_forward
def mm_forward(A, B): # no device mesh needed since we don't do collective return torch.mm(A, B)
from functools import partial import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import ( distribute_tensor, DTensor, init_device_mesh, Replicate, Shard, ) from torch.distributed._tensor.experimental import local_map from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) funcol_py = torch.ops.c10d_functional row_wise = [Shard(0)] # row-wise sharding placements on 1-d mesh col_wise = [Shard(1)] # col-wise sharding placements on 1-d mesh replicate = [Replicate()] # replicate placements on 1-d mesh
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/debug/test_comm_mode.py
tearDown
def tearDown(self): super().tearDown() dist.destroy_process_group()
import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import Shard from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_distributed import requires_nccl from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule from torch.testing._internal.distributed.fake_pg import FakeStore c10d_functional = torch.ops.c10d_functional c10d_ops = torch.ops.c10d class TestCommMode(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/debug/test_comm_mode.py
setUp
def setUp(self): super().setUp() self.world_size = 2 store = FakeStore() dist.init_process_group( backend="fake", rank=1, world_size=self.world_size, store=store ) self.device_type = "cuda" if torch.cuda.is_available() else "cpu" self.world_pg = dist.distributed_c10d._get_default_group()
import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import Shard from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_distributed import requires_nccl from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule from torch.testing._internal.distributed.fake_pg import FakeStore c10d_functional = torch.ops.c10d_functional c10d_ops = torch.ops.c10d class TestCommMode(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/debug/test_comm_mode.py
checksAssert
def checksAssert(self, comm_mode, key, expected_value, expected_total_value): comm_counts = comm_mode.get_comm_counts() self.assertEqual(comm_mode.get_total_counts(), expected_total_value) self.assertEqual(comm_counts[key], expected_value) return
import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import Shard from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_distributed import requires_nccl from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule from torch.testing._internal.distributed.fake_pg import FakeStore c10d_functional = torch.ops.c10d_functional c10d_ops = torch.ops.c10d class TestCommMode(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/debug/test_comm_mode.py
test_comm_mode
def test_comm_mode(self): world_pg = self.world_pg class WrapperModel(nn.Module): def __init__(self, device): super().__init__() self.model = MLPModule(device=device) def forward(self, x): x = funcol.all_gather_tensor(x, 0, world_pg) x = funcol.reduce_scatter_tensor(x, "sum", 0, world_pg) out = self.model(x) return funcol.all_reduce(out, "sum", world_pg) model = WrapperModel(self.device_type) comm_mode = CommDebugMode() with comm_mode: model(torch.randn(20, 10, device=self.device_type)) comm_counts = comm_mode.get_comm_counts() self.assertEqual(comm_mode.get_total_counts(), 3) self.assertEqual(comm_counts[c10d_functional.all_reduce], 1) self.assertEqual(comm_counts[c10d_functional.all_gather_into_tensor], 1) self.assertEqual(comm_counts[c10d_functional.reduce_scatter_tensor], 1)
import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import Shard from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_distributed import requires_nccl from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule from torch.testing._internal.distributed.fake_pg import FakeStore c10d_functional = torch.ops.c10d_functional c10d_ops = torch.ops.c10d class TestCommMode(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_shard/sharded_tensor/test_sharded_tensor.py
test_sub_process_group_placement_validation
def test_sub_process_group_placement_validation(self): world_pg = dist.GroupMember.WORLD self.assertIsNotNone(world_pg) rank = dist.get_rank() sub_group_sz = 2 sub_pg_ranks = [r for r in range(4) if r % sub_group_sz == rank % sub_group_sz] sub_pg = dist.new_group( sub_pg_ranks, backend=dist.get_backend(world_pg), use_local_synchronization=True, ) dist.barrier(sub_pg) for r in sub_pg_ranks: _parse_and_validate_remote_device( sub_pg, _remote_device(f"rank:{r}/cuda:{r % sub_group_sz}") )
import copy import io import itertools import math import pickle import sys from typing import List import torch import torch.distributed as dist from torch.distributed import distributed_c10d, rpc from torch.distributed._shard import sharded_tensor from torch.distributed._shard.api import ( _collect_local_shard, _reshard_output, _shard_tensor, load_with_process_group, shard_parameter, ) from torch.distributed._shard.sharded_tensor import ( custom_sharded_op_impl, pre_load_state_dict_hook, Shard, ShardedTensor, ShardedTensorBase, ShardedTensorMetadata, state_dict_hook, ) from torch.distributed._shard.sharded_tensor.api import ( _create_tensor_from_params, TensorProperties, ) from torch.distributed._shard.sharded_tensor.utils import ( _parse_and_validate_remote_device, ) from torch.distributed._shard.sharding_spec import ( ChunkShardingSpec, EnumerableShardingSpec, ShardMetadata, ) from torch.distributed.remote_device import _remote_device from torch.testing._internal.common_distributed import ( requires_nccl, skip_if_lt_x_gpu, spawn_threads_and_init_comms, tp_transports, ) from torch.testing._internal.common_utils import ( run_tests, skip_but_pass_in_sandcastle_if, TEST_CUDA, TEST_WITH_DEV_DBG_ASAN, TestCase, ) from torch.testing._internal.distributed._shard.sharded_tensor import ( ShardedTensorTestBase, with_comms, ) from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import ( _chunk_sharding_specs_list_for_test, MyShardedModel1, ) from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op class TestShardedTensorSubGroupInit(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_shard/sharded_tensor/test_sharded_tensor.py
test_non_contiguous_local_shards
run_tests()
def test_non_contiguous_local_shards(self): st_metadata: ShardedTensorMetadata = ShardedTensorMetadata( shards_metadata=[ ShardMetadata( shard_offsets=[0, 0], shard_sizes=[2, 2], placement="rank:0/cpu" ), ShardMetadata( shard_offsets=[2, 0], shard_sizes=[2, 2], placement="rank:1/cpu" ), ], size=torch.Size([4, 2]), ) st_local_shards: List[Shard] = [] src = torch.randn(4, 2) for shard_metadata in st_metadata.shards_metadata: offsets = shard_metadata.shard_offsets sizes = shard_metadata.shard_sizes st_local_shards.append( Shard( tensor=src[ offsets[0] : offsets[0] + sizes[0], offsets[1] : offsets[1] + sizes[1], ], metadata=shard_metadata, ) ) ShardedTensorBase._init_from_local_shards_and_global_metadata( local_shards=st_local_shards, sharded_tensor_metadata=st_metadata, )
import copy import io import itertools import math import pickle import sys from typing import List import torch import torch.distributed as dist from torch.distributed import distributed_c10d, rpc from torch.distributed._shard import sharded_tensor from torch.distributed._shard.api import ( _collect_local_shard, _reshard_output, _shard_tensor, load_with_process_group, shard_parameter, ) from torch.distributed._shard.sharded_tensor import ( custom_sharded_op_impl, pre_load_state_dict_hook, Shard, ShardedTensor, ShardedTensorBase, ShardedTensorMetadata, state_dict_hook, ) from torch.distributed._shard.sharded_tensor.api import ( _create_tensor_from_params, TensorProperties, ) from torch.distributed._shard.sharded_tensor.utils import ( _parse_and_validate_remote_device, ) from torch.distributed._shard.sharding_spec import ( ChunkShardingSpec, EnumerableShardingSpec, ShardMetadata, ) from torch.distributed.remote_device import _remote_device from torch.testing._internal.common_distributed import ( requires_nccl, skip_if_lt_x_gpu, spawn_threads_and_init_comms, tp_transports, ) from torch.testing._internal.common_utils import ( run_tests, skip_but_pass_in_sandcastle_if, TEST_CUDA, TEST_WITH_DEV_DBG_ASAN, TestCase, ) from torch.testing._internal.distributed._shard.sharded_tensor import ( ShardedTensorTestBase, with_comms, ) from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import ( _chunk_sharding_specs_list_for_test, MyShardedModel1, ) from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op class TestCreateTensorNoProcessGroupMode(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_device_mesh.py
test_eligible_default_pg_for_mesh
def test_eligible_default_pg_for_mesh(self): mesh_tensor = torch.arange(self.world_size).reshape(2, -1) mesh = DeviceMesh(self.device_type, mesh_tensor)
import os import sys import torch from torch.distributed._tensor.device_mesh import DeviceMesh from torch.distributed._tensor.placement_types import Shard from torch.distributed.distributed_c10d import ( get_global_rank, get_world_size, is_initialized, new_group, ProcessGroup, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.common_distributed import TEST_SKIPS class DeviceMeshTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/_tensor/test_device_mesh.py
test_ineligible_default_pg_for_mesh
def test_ineligible_default_pg_for_mesh(self): device_type, backend = _get_device_type_and_backend() # skip the test if not enough GPUs if backend == "nccl" and torch.cuda.device_count() < self.world_size: sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) _set_env_var(world_size=self.world_size, rank=self.rank) # missing ranks mesh_tensor = torch.arange(self.world_size - 2).reshape(2, -1) with self.assertRaisesRegex(RuntimeError, "DeviceMesh must include every process in WORLD"): mesh = DeviceMesh(device_type, mesh_tensor) # mesh ranks are not unique mesh_tensor = torch.arange(self.world_size).reshape(2, -1) mesh_tensor[0][1] = 2 with self.assertRaisesRegex(RuntimeError, "DeviceMesh cannot have duplicate values"): mesh = DeviceMesh(device_type, mesh_tensor) # mesh ranks don't start from 0 mesh_tensor = torch.arange(start=1, end=(self.world_size + 1)).reshape(2, -1) with self.assertRaisesRegex(RuntimeError, "DeviceMesh ranks must start from 0"): mesh = DeviceMesh(device_type, mesh_tensor) # mesh ranks don't increment correctly mesh_tensor = torch.arange(start=0, end=(2 * self.world_size), step=2).reshape(2, -1) with self.assertRaisesRegex(RuntimeError, "DeviceMesh should have all ranks of WORLD"): mesh = DeviceMesh(device_type, mesh_tensor)
import os import sys import torch from torch.distributed._tensor.device_mesh import DeviceMesh from torch.distributed._tensor.placement_types import Shard from torch.distributed.distributed_c10d import ( get_global_rank, get_world_size, is_initialized, new_group, ProcessGroup, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.common_distributed import TEST_SKIPS class DeviceMeshTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/_tensor/test_device_mesh.py
test_device_mesh_dim_groups_error
def test_device_mesh_dim_groups_error(self): # construct a two dimension subgroups dim_groups = [] expected_ranks_by_dim = [[[0, 2], [1, 3]], [[0, 1], [2, 3]]] for dim_group_ranks in expected_ranks_by_dim: for subgroup_ranks in dim_group_ranks: subgroup = new_group(ranks=subgroup_ranks) if self.rank in subgroup_ranks: dim_groups.append(subgroup) if len(dim_groups) > 0: # dim_groups is not a list self.assertRaises( RuntimeError, DeviceMesh, self.device_type, [[0, 1], [2, 3]], dim_groups=dim_groups[0], ) # dim_groups is a list, but not a list of ProcessGroup self.assertRaises( RuntimeError, DeviceMesh, self.device_type, [[0, 1], [2, 3]], dim_groups=[dim_groups[0], "dummy"], ) # dim_groups has incorrect length self.assertRaises( RuntimeError, DeviceMesh, self.device_type, [[0, 1], [2, 3]], dim_groups=[dim_groups[0]], )
import os import sys import torch from torch.distributed._tensor.device_mesh import DeviceMesh from torch.distributed._tensor.placement_types import Shard from torch.distributed.distributed_c10d import ( get_global_rank, get_world_size, is_initialized, new_group, ProcessGroup, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.common_distributed import TEST_SKIPS class DeviceMeshTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/_tensor/test_device_mesh.py
test_device_mesh_hash
def test_device_mesh_hash(self): mesh_tensor_2d = torch.arange(8).reshape(4, 2) mesh = DeviceMesh(self.device_type, mesh_tensor_2d) mesh2 = DeviceMesh(self.device_type, mesh_tensor_2d) self.assertNotEqual(hash(mesh), hash(mesh2)) mesh_tensor_3d = torch.arange(8).reshape(2, 2, 2) mesh3 = DeviceMesh(self.device_type, mesh_tensor_3d) self.assertNotEqual(hash(mesh), hash(mesh3)) self.assertNotEqual(hash(mesh2), hash(mesh3))
import os import sys import torch from torch.distributed._tensor.device_mesh import DeviceMesh from torch.distributed._tensor.placement_types import Shard from torch.distributed.distributed_c10d import ( get_global_rank, get_world_size, is_initialized, new_group, ProcessGroup, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.common_distributed import TEST_SKIPS class DeviceMeshTestNDim(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/_shard/sharding_spec/test_sharding_spec.py
test_custom_sharding_spec_shard_tensor
def test_custom_sharding_spec_shard_tensor(self): """ Test custom spec can be invoked from the _shard_tensor callsite. """ ranks = [ "rank:0/cuda:0", "rank:1/cuda:1", "rank:2/cuda:2", "rank:3/cuda:3", ] grid_spec = GridShardingSpec( grid_size=2, placements=ranks ) with self.assertRaisesRegex(NotImplementedError, 'not implemented'): _shard_tensor(torch.randn(8, 8), grid_spec)
def test_custom_sharding_spec_shard_tensor(self): """Test custom spec can be invoked from the _shard_tensor callsite. """ ranks = [ "rank:0/cuda:0", "rank:1/cuda:1", "rank:2/cuda:2", "rank:3/cuda:3", ] grid_spec = GridShardingSpec(grid_size=2, placements=ranks) with self.assertRaisesRegex(NotImplementedError, "not implemented"): _shard_tensor(torch.randn(8, 8), grid_spec)
from typing import List, Union from dataclasses import dataclass import copy import torch from torch.testing._internal.common_utils import TestCase from torch.testing._internal.common_distributed import ( requires_nccl, skip_if_lt_x_gpu, ) from torch.distributed._shard import sharded_tensor, _shard_tensor from torch.distributed._shard.sharding_spec import ( ShardingSpec, ChunkShardingSpec, DevicePlacementSpec, EnumerableShardingSpec, ShardMetadata, _infer_sharding_spec_from_shards_metadata, ) from torch.distributed._shard.sharded_tensor import ( TensorProperties, ShardedTensor, ShardedTensorMetadata, ) from torch.distributed._shard.sharding_spec._internals import ( check_tensor, get_split_size, get_chunked_dim_size, get_chunk_sharding_params, validate_non_overlapping_shards_metadata, ) from torch.testing._internal.common_utils import ( run_tests, sandcastle_skip_if, ) from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import ( _chunk_sharding_specs_list_for_test, ) from torch.testing._internal.distributed._shard.sharded_tensor import ( ShardedTensorTestBase, with_comms, ) class TestCustomShardingSpec(ShardedTensorTestBase):
import copy from dataclasses import dataclass from typing import List, Union import torch from torch.distributed._shard import _shard_tensor, sharded_tensor from torch.distributed._shard.sharded_tensor import ( ShardedTensor, ShardedTensorMetadata, TensorProperties, ) from torch.distributed._shard.sharding_spec import ( _infer_sharding_spec_from_shards_metadata, ChunkShardingSpec, DevicePlacementSpec, EnumerableShardingSpec, ShardingSpec, ShardMetadata, ) from torch.distributed._shard.sharding_spec._internals import ( check_tensor, get_chunk_sharding_params, get_chunked_dim_size, get_split_size, validate_non_overlapping_shards_metadata, ) from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) from torch.testing._internal.distributed._shard.sharded_tensor import ( ShardedTensorTestBase, with_comms, ) from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import ( _chunk_sharding_specs_list_for_test, ) class TestCustomShardingSpec(ShardedTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/_shard/test_partial_tensor.py
test_partial_tensor_reshard_errors
def test_partial_tensor_reshard_errors(self): enumerable_sharding_spec = EnumerableShardingSpec( [ ShardMetadata( shard_offsets=[0, 0], shard_sizes=[5, 5], placement="rank:0/cuda:0", ), ShardMetadata( shard_offsets=[5, 0], shard_sizes=[5, 5], placement="rank:1/cuda:1", ), ] ) with self.assertRaisesRegex( NotImplementedError, "Only ChunkShardingSpec supported for reshard." ): self._run_partial_tensor_n_reshard( enumerable_sharding_spec, [13, 21], 4, dist.ReduceOp.SUM ) self._run_partial_tensor_n_reshard( enumerable_sharding_spec, [12, 22], 4, dist.ReduceOp.MAX ) specs = _chunk_sharding_specs_list_for_test([0], seed=7) spec = specs[0] with self.assertRaisesRegex( NotImplementedError, "Only real partial tensor supported for reshard." ): self._run_partial_tensor_n_reshard( spec, [13, 21], 4, dist.ReduceOp.SUM, dtype=torch.cfloat ) self._run_partial_tensor_n_reshard( spec, [12, 22], 4, dist.ReduceOp.MAX, dtype=torch.cfloat )
import sys import torch import torch.distributed as dist from torch.distributed._shard.partial_tensor import ( _PartialTensor, ) from torch.distributed._shard.sharding_spec import ( ChunkShardingSpec, EnumerableShardingSpec, ShardMetadata, ) from torch.testing._internal.common_distributed import ( requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import ( TEST_WITH_DEV_DBG_ASAN, run_tests, ) from torch.testing._internal.distributed._shard.sharded_tensor import ( ShardedTensorTestBase, with_comms, TEST_GPU_NUM ) from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import ( _chunk_sharding_specs_list_for_test, ) class TestPartialTensorReshard(ShardedTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/_shard/test_partial_tensor.py
test_cat_errors
def test_cat_errors(self): with self.assertRaisesRegex( RuntimeError, 'All inputs need to be an instance of _PartialTensor' ): torch.cat([_PartialTensor(torch.rand(10)), torch.rand(10)]) with self.assertRaisesRegex( RuntimeError, 'reduce_ops need to be the same' ): torch.cat([_PartialTensor(torch.rand(10)), _PartialTensor(torch.rand(10), reduce_op=dist.ReduceOp.MAX)]) with self.assertRaisesRegex( RuntimeError, '"out" kwarg is not supported' ): torch.cat([_PartialTensor(torch.rand(10)), _PartialTensor(torch.rand(10))], out=torch.rand(10))
import sys import torch import torch.distributed as dist from torch.distributed._shard.partial_tensor import ( _PartialTensor, ) from torch.distributed._shard.sharding_spec import ( ChunkShardingSpec, EnumerableShardingSpec, ShardMetadata, ) from torch.testing._internal.common_distributed import ( requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import ( TEST_WITH_DEV_DBG_ASAN, run_tests, ) from torch.testing._internal.distributed._shard.sharded_tensor import ( ShardedTensorTestBase, with_comms, TEST_GPU_NUM ) from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import ( _chunk_sharding_specs_list_for_test, ) class TestPartialTensorOps(ShardedTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/_shard/sharding_plan/test_sharding_plan.py
test_shard_module_sub_process_group
def test_shard_module_sub_process_group(self): megatron_lm = SimpleMegatronLM([[17, 12], [12, 29]], rank=self.rank) colwise_sharding_spec = ChunkShardingSpec( dim=0, placements=[ "rank:0/cuda:2", "rank:1/cuda:3", ], ) rowwise_sharding_spec = ChunkShardingSpec( dim=1, placements=[ "rank:0/cuda:2", "rank:1/cuda:3", ], ) sharding_plan = ShardingPlan( plan={ "fc1.weight": colwise_sharding_spec, "fc2.weight": rowwise_sharding_spec } ) pg = dist.new_group([2, 3]) if self.rank >= 2: shard_module(megatron_lm, sharding_plan, process_group=pg)
def test_shard_module_sub_process_group(self): megatron_lm = SimpleMegatronLM([[17, 12], [12, 29]], rank=self.rank) colwise_sharding_spec = ChunkShardingSpec( dim=0, placements=[ "rank:2/cuda:2", "rank:3/cuda:3", ], ) rowwise_sharding_spec = ChunkShardingSpec( dim=1, placements=[ "rank:2/cuda:2", "rank:3/cuda:3", ], ) sharding_plan = ShardingPlan( plan={ "fc1.weight": colwise_sharding_spec, "fc2.weight": rowwise_sharding_spec, } ) pg = dist.new_group([2, 3]) if self.rank >= 2: shard_module(megatron_lm, sharding_plan, process_group=pg)
import sys import copy import torch import torch.nn as nn import torch.distributed as dist from torch.distributed._shard.sharded_optim import ( ShardedOptimizer, ) from torch.testing._internal.common_distributed import ( requires_nccl, skip_if_lt_x_gpu, ) from torch.distributed._shard import shard_module from torch.distributed._shard.sharding_plan import ShardingPlan, ShardingPlanner from torch.distributed._shard.sharding_spec import ChunkShardingSpec from torch.distributed._shard.sharded_tensor import ShardedTensor from torch.testing._internal.common_utils import ( TEST_WITH_DEV_DBG_ASAN, run_tests, ) from torch.testing._internal.distributed._shard.sharded_tensor import ( TEST_GPU_NUM, ShardedTensorTestBase, with_comms, ) from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import ( generate_chunk_sharding_specs_for_test, generate_local_weight_sharding_params_for_test, ) from torch.testing._internal.distributed._shard.test_common import SimpleMegatronLM class TestShardingPlan(ShardedTensorTestBase):
import sys import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._shard import shard_module from torch.distributed._shard.sharded_tensor import ShardedTensor from torch.distributed._shard.sharding_plan import ShardingPlan, ShardingPlanner from torch.distributed._shard.sharding_spec import ChunkShardingSpec from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN from torch.testing._internal.distributed._shard.sharded_tensor import ( ShardedTensorTestBase, TEST_GPU_NUM, with_comms, ) from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import ( generate_chunk_sharding_specs_for_test, ) from torch.testing._internal.distributed._shard.test_common import SimpleMegatronLM class TestShardingPlan(ShardedTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/_tensor/test_api.py
input_fn
def input_fn(inputs, device_mesh): return DTensor.from_local(inputs[0], device_mesh, [Shard(0)])
def input_fn(mod, inputs, device_mesh): return DTensor.from_local(inputs[0], device_mesh, [Shard(0)])
import torch import torch.nn as nn from torch.distributed._tensor import ( DeviceMesh, distribute_module, distribute_tensor, DTensor, Replicate, Shard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, )
import torch import torch.nn as nn from torch.distributed._tensor import ( DeviceMesh, distribute_module, distribute_tensor, DTensor, Replicate, Shard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/_tensor/test_api.py
output_fn
def output_fn(outputs, device_mesh): assert isinstance(outputs, DTensor) return outputs.to_local() replica_module = distribute_module( module_to_replicate, device_mesh, input_fn=input_fn, output_fn=output_fn, ) input_tensor = torch.randn(5, 20, device=self.device_type) local_out = replica_module(input_tensor) self.assertIsInstance(local_out, torch.Tensor) self.assertNotIsInstance(local_out, DTensor) # full replicate (even on inputs) model = MyModel(10, 10, device=self.device_type)
def output_fn(mod, outputs, device_mesh): assert isinstance(outputs, DTensor) return outputs.to_local() replica_module = distribute_module( module_to_replicate, device_mesh, input_fn=input_fn, output_fn=output_fn, ) input_tensor = torch.randn(5, 20, device=self.device_type) local_out = replica_module(input_tensor) self.assertIsInstance(local_out, torch.Tensor) self.assertNotIsInstance(local_out, DTensor) # full replicate (even on inputs) model = MyModel(10, 10, device=self.device_type)
import torch import torch.nn as nn from torch.distributed._tensor import ( DeviceMesh, distribute_module, distribute_tensor, DTensor, Replicate, Shard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, )
import torch import torch.nn as nn from torch.distributed._tensor import ( DeviceMesh, distribute_module, distribute_tensor, DTensor, Replicate, Shard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/_tensor/test_api.py
replicate_input_fn
def replicate_input_fn(inputs, device_mesh): return DTensor.from_local(inputs[0], device_mesh, [Replicate()]) replica_model = distribute_module( model, device_mesh, input_fn=replicate_input_fn, ) input = torch.randn(10, 10, requires_grad=True) output = replica_model(input) output.sum().backward() param_grad = list(replica_model.parameters())[0].grad self.assertTrue(isinstance(param_grad, DTensor)) self.assertTrue(isinstance(param_grad.placements[0], Replicate))
def replicate_input_fn(mod, inputs, device_mesh): return DTensor.from_local(inputs[0], device_mesh, [Replicate()]) replica_model = distribute_module( model, device_mesh, input_fn=replicate_input_fn, ) input = torch.randn(10, 10, requires_grad=True) output = replica_model(input) output.sum().backward() param_grad = next(iter(replica_model.parameters())).grad self.assertTrue(isinstance(param_grad, DTensor)) self.assertTrue(isinstance(param_grad.placements[0], Replicate))
import torch import torch.nn as nn from torch.distributed._tensor import ( DeviceMesh, distribute_module, distribute_tensor, DTensor, Replicate, Shard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, )
import torch import torch.nn as nn from torch.distributed._tensor import ( DeviceMesh, distribute_module, distribute_tensor, DTensor, Replicate, Shard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/_tensor/test_api.py
input_fn
def input_fn(inputs, device_mesh): return DTensor.from_local(inputs[0], device_mesh, [Shard(0)])
def input_fn(mod, inputs, device_mesh): return DTensor.from_local(inputs[0], device_mesh, [Shard(0)])
import torch import torch.nn as nn from torch.distributed._tensor import ( DeviceMesh, distribute_module, distribute_tensor, DTensor, Replicate, Shard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, )
import torch import torch.nn as nn from torch.distributed._tensor import ( DeviceMesh, distribute_module, distribute_tensor, DTensor, Replicate, Shard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/_tensor/test_api.py
output_fn
def output_fn(outputs, device_mesh): assert isinstance(outputs, DTensor) return outputs.to_local() replica_module = distribute_module( module_to_replicate, device_mesh, input_fn=input_fn, output_fn=output_fn, ) input_tensor = torch.randn(5, 20, device=self.device_type) local_out = replica_module(input_tensor) self.assertIsInstance(local_out, torch.Tensor) self.assertNotIsInstance(local_out, DTensor) # full replicate (even on inputs) model = MyModel(10, 10, device=self.device_type)
def output_fn(mod, outputs, device_mesh): assert isinstance(outputs, DTensor) return outputs.to_local() replica_module = distribute_module( module_to_replicate, device_mesh, input_fn=input_fn, output_fn=output_fn, ) input_tensor = torch.randn(5, 20, device=self.device_type) local_out = replica_module(input_tensor) self.assertIsInstance(local_out, torch.Tensor) self.assertNotIsInstance(local_out, DTensor) # full replicate (even on inputs) model = MyModel(10, 10, device=self.device_type)
import torch import torch.nn as nn from torch.distributed._tensor import ( DeviceMesh, distribute_module, distribute_tensor, DTensor, Replicate, Shard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, )
import torch import torch.nn as nn from torch.distributed._tensor import ( DeviceMesh, distribute_module, distribute_tensor, DTensor, Replicate, Shard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/_tensor/experimental/test_tp_transform.py
__init__
def __init__(self, num_mlps=3, bias=True): super().__init__() self.mlps = torch.nn.ModuleList() for _ in range(num_mlps): self.mlps.append( torch.nn.Sequential( torch.nn.Linear(6, 18), torch.nn.ReLU(), torch.nn.Linear(18, 6, bias=bias), ) )
from collections import defaultdict from typing import Dict import torch from torch.distributed._tensor.experimental._tp_transform import ( tensor_parallel_transformation, ) from torch.distributed.tensor.parallel.style import ( ColwiseParallel, ParallelStyle, RowwiseParallel, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) class MLPListModule(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/experimental/test_tp_transform.py
forward
def forward(self, x: torch.Tensor) -> torch.Tensor: x = torch.chunk(x, 2, dim=1)[0] for mlp in self.mlps: x = mlp(x) return x + torch.ones_like(x)
from collections import defaultdict from typing import Dict import torch from torch.distributed._tensor.experimental._tp_transform import ( tensor_parallel_transformation, ) from torch.distributed.tensor.parallel.style import ( ColwiseParallel, ParallelStyle, RowwiseParallel, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) class MLPListModule(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/experimental/test_tp_transform.py
test_tp_transform_no_bias
def test_tp_transform_no_bias(self): torch.manual_seed(0) model = MLPListModule(1, bias=False).to(device=self.device_type) inputs = (torch.randn((10, 12)).to(device=self.device_type),) parallel_strategies: Dict[str, ParallelStyle] = { "mlps.0.0": ColwiseParallel, "mlps.0.2": RowwiseParallel, } with torch.inference_mode(): res = model(*inputs) exported_program = torch.export.export( model, inputs, ).run_decompositions() tp_exported_program = tensor_parallel_transformation( exported_program, self.rank, self.world_size, self.device_type, parallel_strategies, ) tp_model = tp_exported_program.module() with torch.inference_mode(): tp_res = tp_model(*inputs) self.assertEqual(res, tp_res) self.assert_has_c10d_ops( tp_exported_program.graph_module, { "_c10d_functional.all_reduce.default": 1, "_c10d_functional.wait_tensor.default": 1, }, )
from collections import defaultdict from typing import Dict import torch from torch.distributed._tensor.experimental._tp_transform import ( tensor_parallel_transformation, ) from torch.distributed.tensor.parallel.style import ( ColwiseParallel, ParallelStyle, RowwiseParallel, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) class TensorParallelTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_common_rules.py
test_reduction_rule
def test_reduction_rule(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) func_schema = parse_schema( "aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor" ) # reduction on a 2d mat mat1 = [0, -1] mat1_spec = DTensorSpec.from_dim_map(mesh, mat1, [], shape=torch.Size([8, 4])) # reduction on dim 0 output_sharding_0 = reduction_rule( OpSchema(func_schema, (mat1_spec, 0), {}), dims=[0], reduction_linear=True, ) self.assertIsNotNone(output_sharding_0.output_spec) self.assertEqual(output_sharding_0.output_spec.dim_map, [-1]) # pending sum on dim 0 self.assertEqual(output_sharding_0.output_spec.sums, [0]) self.assertEqual(output_sharding_0.output_spec.shape, torch.Size([4])) # reduction on dim 1 output_sharding_1 = reduction_rule( OpSchema(func_schema, (mat1_spec, 1), {}), dims=[1], reduction_linear=True, ) self.assertIsNotNone(output_sharding_1.output_spec) self.assertEqual(output_sharding_1.output_spec.dim_map, [0]) self.assertEqual(output_sharding_1.output_spec.sums, []) self.assertEqual(output_sharding_1.output_spec.shape, torch.Size([8])) # full reduction if not specify dim output_sharding_all_dim = reduction_rule( OpSchema(func_schema, (mat1_spec,), {}), dims=[0, 1], reduction_linear=True, ) self.assertIsNotNone(output_sharding_all_dim.output_spec) self.assertEqual(output_sharding_all_dim.output_spec.dim_map, []) # pending sum on mesh self.assertEqual(output_sharding_all_dim.output_spec.sums, [0]) self.assertEqual(output_sharding_all_dim.output_spec.shape, torch.Size([]))
if __name__ == "__main__": run_tests()
import torch from torch._C import parse_schema from torch.distributed._tensor import DeviceMesh from torch.distributed._tensor.op_schema import OpSchema from torch.distributed._tensor.ops.common_rules import ( einop_rule, pointwise_rule, reduction_rule, ) from torch.distributed._tensor.placement_types import DTensorSpec from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) class CommonRulesTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/_tensor/test_convolution_ops.py
test_depthwise_convolution
def test_depthwise_convolution(self): device_mesh = DeviceMesh(self.device_type, list(range(self.world_size))) shard_spec = [Shard(3)] input_list = torch.rand(ITER_TIME, 7, 256, 128, 256) grad_output_list = torch.rand(ITER_TIME, 7, 256, 128, 256) * 1e-3 model = nn.Conv2d(256, 256, kernel_size=7, padding=3, groups=256).to( self.device_type ) nn.init.ones_(model.weight) nn.init.zeros_(model.bias) model_gt = copy.deepcopy(model).to(self.device_type) # training with dtensor model = distribute_module( model, device_mesh, _conv_fn, input_fn=None, output_fn=None ) optimizer = torch.optim.SGD(model.parameters(), lr=LR) for i in range(ITER_TIME): optimizer.zero_grad() inp = input_list[i].to(self.device_type).requires_grad_() inp_dtensor = distribute_tensor(inp, device_mesh, shard_spec) output = model(inp_dtensor) grad_output = grad_output_list[i].to(self.device_type) grad_output_dtensor = distribute_tensor( grad_output, device_mesh, shard_spec ) output.backward(grad_output_dtensor) optimizer.step() # training with plain tensor optimizer_gt = torch.optim.SGD(model_gt.parameters(), lr=LR) for i in range(ITER_TIME): optimizer_gt.zero_grad() inp = input_list[i].to(self.device_type).requires_grad_() output = model_gt(inp) grad_output = grad_output_list[i].to(self.device_type) output.backward(grad_output) optimizer_gt.step() weight_diff_abs = model.weight.to_local() - model_gt.weight bias_diff_abs = model.bias.to_local() - model_gt.bias weight_diff_rel = weight_diff_abs / (torch.abs(model_gt.weight) + 1e-8) bias_diff_rel = bias_diff_abs / (torch.abs(model_gt.bias) + 1e-8) weight_mse_abs = torch.mean(weight_diff_abs * weight_diff_abs).item() bias_mse_abs = torch.mean(bias_diff_abs * bias_diff_abs).item() weight_mse_rel = torch.mean(weight_diff_rel * weight_diff_rel).item() bias_mse_rel = torch.mean(bias_diff_rel * bias_diff_rel).item() self.assertTrue( weight_mse_abs <= 1e-6, f"Too large absolute mse for weight tensor, expected less equal 1e-6, got {weight_mse_abs}", ) self.assertTrue( bias_mse_abs <= 1e-6, f"Too large absolute mse for bias tensor, expected less equal 1e-6, got {bias_mse_abs}", ) self.assertTrue( weight_mse_rel <= 1e-6, f"Too large relative mse for weight tensor, expected less equal 1e-6, got {weight_mse_rel}", ) self.assertTrue( bias_mse_rel <= 1e-6, f"Too large relative mse for bias tensor, expected less equal 1e-6, got {bias_mse_rel}", )
import copy import torch import torch.nn as nn from torch.distributed._tensor import ( DeviceMesh, distribute_module, distribute_tensor, Replicate, Shard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, skip_if_lt_x_gpu, with_comms, ) ITER_TIME = 10 LR = 0.001 class DistConvolutionOpsTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_device_mesh.py
_get_device_type_and_backend
def _get_device_type_and_backend(): device_type = "cuda" if torch.cuda.is_available() else "cpu" backend = "nccl" if device_type == "cuda" else "gloo" return device_type, backend
import os import sys import torch from torch.distributed._tensor.device_mesh import DeviceMesh from torch.distributed._tensor.placement_types import Shard from torch.distributed.distributed_c10d import ( get_global_rank, get_world_size, is_initialized, new_group, ProcessGroup, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.common_distributed import TEST_SKIPS
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/_tensor/test_device_mesh.py
_set_env_var
def _set_env_var(addr="localhost", port="25364", world_size=1, rank=0): os.environ["MASTER_ADDR"] = addr os.environ["MASTER_PORT"] = port os.environ["WORLD_SIZE"] = f"{world_size}" os.environ["RANK"] = f"{rank}" class DeviceMeshTest(DTensorTestBase): @property def world_size(self): return 4 @with_comms def test_eligible_default_pg_for_mesh(self): mesh_tensor = torch.arange(self.world_size).reshape(2, -1) mesh = DeviceMesh(self.device_type, mesh_tensor) def test_ineligible_default_pg_for_mesh(self): device_type, backend = _get_device_type_and_backend() # skip the test if not enough GPUs if backend == "nccl" and torch.cuda.device_count() < self.world_size: sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) _set_env_var(world_size=self.world_size, rank=self.rank) # missing ranks mesh_tensor = torch.arange(self.world_size - 2).reshape(2, -1) with self.assertRaisesRegex(RuntimeError, "DeviceMesh must include every process in WORLD"): mesh = DeviceMesh(device_type, mesh_tensor) # mesh ranks are not unique mesh_tensor = torch.arange(self.world_size).reshape(2, -1) mesh_tensor[0][1] = 2 with self.assertRaisesRegex(RuntimeError, "DeviceMesh cannot have duplicate values"): mesh = DeviceMesh(device_type, mesh_tensor) # mesh ranks don't start from 0 mesh_tensor = torch.arange(start=1, end=(self.world_size + 1)).reshape(2, -1) with self.assertRaisesRegex(RuntimeError, "DeviceMesh ranks must start from 0"): mesh = DeviceMesh(device_type, mesh_tensor) # mesh ranks don't increment correctly mesh_tensor = torch.arange(start=0, end=(2 * self.world_size), step=2).reshape(2, -1) with self.assertRaisesRegex(RuntimeError, "DeviceMesh should have all ranks of WORLD"): mesh = DeviceMesh(device_type, mesh_tensor) def test_init_process_group(self): device_type, backend = _get_device_type_and_backend() # skip the test if not enough GPUs if backend == "nccl" and torch.cuda.device_count() < self.world_size: sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) mesh_tensor = torch.arange(4).reshape(2, 2) self.assertTrue(not is_initialized()) _set_env_var(world_size=self.world_size, rank=self.rank) mesh = DeviceMesh(device_type, mesh_tensor) self.assertTrue(is_initialized()) self.destroy_pg() @with_comms def test_device_mesh_2d(self): mesh_tensor = torch.arange(4).reshape(2, 2) # construct a cuda device mesh mesh = DeviceMesh(self.device_type, mesh_tensor) # check all dim groups dim_to_subgroups = mesh.get_dim_groups() expected_ranks_by_dim = [[[0, 2], [1, 3]], [[0, 1], [2, 3]]] for dim, dim_group in enumerate(dim_to_subgroups): self.assertTrue(dim < 2) dim_ranks = expected_ranks_by_dim[dim] dim_group_size = get_world_size(dim_group) self.assertIsInstance(dim_group, ProcessGroup) self.assertEqual(dim_group_size, 2) global_ranks = [ get_global_rank(dim_group, i) for i in range(dim_group_size) ] current_rank_expected_group_ranks = ( dim_ranks[0] if self.rank in dim_ranks[0] else dim_ranks[1] ) self.assertEqual(global_ranks, current_rank_expected_group_ranks) @with_comms def test_device_mesh_2d_from_dim_groups(self): # construct a two dimension subgroups dim_groups = [] expected_ranks_by_dim = [[[0, 2], [1, 3]], [[0, 1], [2, 3]]] for dim_group_ranks in expected_ranks_by_dim: for subgroup_ranks in dim_group_ranks: subgroup = new_group(ranks=subgroup_ranks) if self.rank in subgroup_ranks: dim_groups.append(subgroup) # construct a device mesh from the subgroups mesh = DeviceMesh(self.device_type, [[0, 1], [2, 3]], dim_groups=dim_groups) # check all dim groups dim_to_subgroups = mesh.get_dim_groups() for dim, dim_group in enumerate(dim_to_subgroups): self.assertTrue(dim < 2) dim_ranks = expected_ranks_by_dim[dim] dim_group_size = get_world_size(dim_group) self.assertIsInstance(dim_group, ProcessGroup) self.assertEqual(dim_group_size, 2) global_ranks = [ get_global_rank(dim_group, i) for i in range(dim_group_size) ] current_rank_expected_group_ranks = ( dim_ranks[0] if self.rank in dim_ranks[0] else dim_ranks[1] ) self.assertEqual(global_ranks, current_rank_expected_group_ranks) @with_comms def test_device_mesh_dim_groups_error(self): # construct a two dimension subgroups dim_groups = [] expected_ranks_by_dim = [[[0, 2], [1, 3]], [[0, 1], [2, 3]]] for dim_group_ranks in expected_ranks_by_dim: for subgroup_ranks in dim_group_ranks: subgroup = new_group(ranks=subgroup_ranks) if self.rank in subgroup_ranks: dim_groups.append(subgroup) if len(dim_groups) > 0: # dim_groups is not a list self.assertRaises( RuntimeError, DeviceMesh, self.device_type, [[0, 1], [2, 3]], dim_groups=dim_groups[0], ) # dim_groups is a list, but not a list of ProcessGroup self.assertRaises( RuntimeError, DeviceMesh, self.device_type, [[0, 1], [2, 3]], dim_groups=[dim_groups[0], "dummy"], ) # dim_groups has incorrect length self.assertRaises( RuntimeError, DeviceMesh, self.device_type, [[0, 1], [2, 3]], dim_groups=[dim_groups[0]], ) class DeviceMeshTestNDim(DTensorTestBase): @property def world_size(self): return 8 def test_mesh_size_requirement_error(self): device_type, backend = _get_device_type_and_backend() # skip the test if not enough GPUs if backend == "nccl" and torch.cuda.device_count() < self.world_size: sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) mesh_tensor = torch.arange(4).reshape(2, 2) _set_env_var(world_size=self.world_size, rank=self.rank) with self.assertRaisesRegex(RuntimeError, "DeviceMesh must include every process in WORLD"): mesh = DeviceMesh(device_type, mesh_tensor) self.assertTrue(not is_initialized()) @with_comms def test_device_mesh_nd(self): # construct a cuda device mesh mesh_tensor = torch.arange(8).reshape(2, 2, 2) mesh = DeviceMesh(self.device_type, mesh_tensor) # check all dim groups dim_to_subgroups = mesh.get_dim_groups() for dim, dim_group in enumerate(dim_to_subgroups): self.assertTrue(dim < mesh_tensor.ndim) dim_ranks = mesh_tensor.swapdims(-1, dim).reshape(-1, 2) # print(dim_ranks) # dim_ranks = expected_ranks_by_dim[dim] dim_group_size = get_world_size(dim_group) self.assertIsInstance(dim_group, ProcessGroup) self.assertEqual(dim_group_size, 2) global_ranks = [ get_global_rank(dim_group, i) for i in range(dim_group_size) ] for ranks in dim_ranks: if self.rank in ranks: self.assertEqual(global_ranks, ranks.tolist()) @with_comms def test_device_mesh_hash(self): mesh_tensor_2d = torch.arange(8).reshape(4, 2) mesh = DeviceMesh(self.device_type, mesh_tensor_2d) mesh2 = DeviceMesh(self.device_type, mesh_tensor_2d) self.assertNotEqual(hash(mesh), hash(mesh2)) mesh_tensor_3d = torch.arange(8).reshape(2, 2, 2) mesh3 = DeviceMesh(self.device_type, mesh_tensor_3d) self.assertNotEqual(hash(mesh), hash(mesh3)) self.assertNotEqual(hash(mesh2), hash(mesh3)) class DeviceMeshCollectiveTest(DTensorTestBase): @property def world_size(self): return 8 @with_comms def test_all_reduce_1d(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) local_tensor = torch.ones(3, 3, device=self.device_type) * self.rank mesh.all_reduce(local_tensor, mesh_dim=0) res_num = ((0 + self.world_size - 1) * self.world_size) / 2 self.assertEqual(local_tensor, torch.ones(3, 3) * res_num) @with_comms def test_broadcast_1d(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) local_tensor = torch.ones(3, 3, device=self.device_type) * self.rank mesh.broadcast(local_tensor, mesh_dim=0) self.assertEqual(local_tensor, torch.zeros(3, 3)) @with_comms def test_scatter_1d(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) scatter_tensor_shape = [3, 3, 3] for scatter_dim in range(len(scatter_tensor_shape)): shard_placement = Shard(scatter_dim) scatter_tensor_shape[scatter_dim] *= self.world_size # make the random seed same across rank torch.manual_seed(0) global_tensor = torch.randn(scatter_tensor_shape, device=self.device_type) splitted_list, _ = shard_placement._split_tensor( global_tensor, mesh.size(), with_padding=True, contiguous=True ) recv_tensor = torch.empty_like(splitted_list[mesh.get_rank()]) # scatter on dim > 0 would generate non-contiguous tensor, verify that works mesh.scatter(recv_tensor, splitted_list, mesh_dim=0) self.assertEqual(recv_tensor, splitted_list[mesh.get_rank()]) @with_comms def test_scatter_uneven(self): device_mesh = DeviceMesh(self.device_type, list(range(self.world_size))) my_rank = device_mesh.get_rank() tensor_to_split = torch.randn( device_mesh.size() + 3, device_mesh.size() + 1, device=self.device_type ) for shard_dim in range(tensor_to_split.ndim): shard_placement = Shard(shard_dim) tensor_to_scatter = tensor_to_split.clone() tensor_splitted_list = tensor_to_split.tensor_split( device_mesh.size(), dim=shard_dim ) padded_tensor_list, pad_idx = shard_placement._split_tensor( tensor_to_scatter, device_mesh.size(), with_padding=True, contiguous=True, ) scattered_tensor = torch.empty_like(padded_tensor_list[my_rank]) device_mesh.scatter(scattered_tensor, padded_tensor_list, mesh_dim=0) # unpad scattered_tensor if pad_idx != 0 and my_rank >= pad_idx: scattered_tensor = shard_placement._unpad_tensor(scattered_tensor) self.assertEqual( scattered_tensor.size(), tensor_splitted_list[my_rank].size() ) self.assertEqual(scattered_tensor, tensor_splitted_list[my_rank]) @with_comms def test_all_gather_1d(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) dims_to_gather = [0, 1] for dim in dims_to_gather: output_size = [3, 3] output_size[dim] *= self.world_size # each rank have its own tensor, all_gather gives a list local_tensor = torch.ones(3, 3, device=self.device_type) gathered_list = [] for _ in range(self.world_size): gathered_list.append(torch.zeros_like(local_tensor)) mesh.all_gather(gathered_list, local_tensor, mesh_dim=0) gathered_tensor = torch.cat(gathered_list, dim=dim) self.assertEqual(gathered_tensor, torch.ones(output_size)) @with_comms def test_all_gather_uneven(self): device_mesh = DeviceMesh(self.device_type, list(range(self.world_size))) my_rank = device_mesh.get_rank() tensor_to_split = torch.ones( device_mesh.size() + 3, device_mesh.size() + 1, device=self.device_type, ) for shard_dim in range(tensor_to_split.ndim): shard_placement = Shard(shard_dim) tensor_padded_list, pad_idx = shard_placement._split_tensor( tensor_to_split, device_mesh.size(), with_padding=True, contiguous=True, ) local_tensor = tensor_padded_list[my_rank] gathered_list = [] for _ in range(device_mesh.size()): gathered_list.append(torch.empty_like(local_tensor)) device_mesh.all_gather( gathered_list, local_tensor, mesh_dim=0, ) if pad_idx != 0: gathered_list = [ shard_placement._unpad_tensor(gathered_tensor) if i >= pad_idx else gathered_tensor for i, gathered_tensor in enumerate(gathered_list) ] all_gathered_tensor = torch.cat(gathered_list, dim=shard_dim) self.assertEqual(all_gathered_tensor.size(), tensor_to_split.size()) self.assertEqual(all_gathered_tensor, tensor_to_split) @with_comms def test_reduce_scatter_1d(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) dims_to_scatter = [0, 1] for dim in dims_to_scatter: input_size = [3, 3] scattered_tensor = torch.empty(input_size, device=self.device_type) input_size[dim] *= self.world_size shard_placement = Shard(dim) input_rs_list, _ = shard_placement._split_tensor( torch.ones(input_size, device=self.device_type) * self.rank, mesh.size(), with_padding=True, contiguous=True, ) res_num = ((0 + self.world_size - 1) * self.world_size) / 2 mesh.reduce_scatter(scattered_tensor, input_rs_list, mesh_dim=0) self.assertEqual(scattered_tensor, torch.ones(3, 3) * res_num) @with_comms def test_reduce_scatter_uneven(self): device_mesh = DeviceMesh(self.device_type, list(range(self.world_size))) my_rank = device_mesh.get_rank() tensor_to_split = ( torch.ones( device_mesh.size() + 3, device_mesh.size() + 1, device=self.device_type, ) * self.rank ) for shard_dim in range(tensor_to_split.ndim): shard_placement = Shard(shard_dim) tensor_to_scatter = tensor_to_split.clone() tensor_splitted_list = tensor_to_split.tensor_split( device_mesh.size(), dim=shard_dim ) padded_tensor_list, pad_idx = shard_placement._split_tensor( tensor_to_scatter, device_mesh.size(), with_padding=True, contiguous=True, ) res_num = ((0 + self.world_size - 1) * self.world_size) / 2 scattered_tensor = torch.empty_like(padded_tensor_list[my_rank]) device_mesh.reduce_scatter(scattered_tensor, padded_tensor_list, mesh_dim=0) # unpad scattered_tensor if pad_idx != 0 and my_rank >= pad_idx: scattered_tensor = shard_placement._unpad_tensor(scattered_tensor) self.assertEqual( scattered_tensor.size(), tensor_splitted_list[my_rank].size() ) self.assertEqual( scattered_tensor, torch.ones_like(tensor_splitted_list[my_rank]) * res_num, ) @with_comms def test_all_gather_nd(self): mesh_tensor = torch.arange(8).reshape(2, 2, 2) mesh = DeviceMesh(self.device_type, mesh_tensor) local_tensor = torch.ones(3, 3, device=self.device_type) * self.rank dim_to_subgroups = mesh.get_dim_groups() for dim, dim_group in enumerate(dim_to_subgroups): dim_group_size = get_world_size(dim_group) global_ranks = [ get_global_rank(dim_group, i) for i in range(dim_group_size) ] gathered_tensor_list = list( torch.empty( (dim_group_size * 3, 3), device=self.device_type ).tensor_split(dim_group_size, dim=0) ) mesh.all_gather(gathered_tensor_list, local_tensor, mesh_dim=dim) gathered_tensor = torch.cat(gathered_tensor_list) exp_tensor = torch.ones(3 * dim_group_size, 3) for i in range(len(global_ranks)): exp_tensor[i * 3 : (i + 1) * 3] = torch.ones(3, 3) * global_ranks[i] self.assertEqual(gathered_tensor, exp_tensor) @with_comms def test_reduce_scatter_nd(self): mesh_tensor = torch.arange(8).reshape(2, 2, 2) mesh = DeviceMesh(self.device_type, mesh_tensor) dim_to_subgroups = mesh.get_dim_groups() for dim, dim_group in enumerate(dim_to_subgroups): input_size = [3, 3, 3] dim_group_size = get_world_size(dim_group) input_size[dim] *= dim_group_size shard_placement = Shard(dim) local_rs_list, _ = shard_placement._split_tensor( torch.ones(input_size, device=self.device_type) * self.rank, dim_group_size, with_padding=True, contiguous=True, ) scattered_tensor = torch.empty_like( local_rs_list[mesh.get_coordinate_on_dim(dim)], device=self.device_type, ) global_ranks = [ get_global_rank(dim_group, i) for i in range(dim_group_size) ] mesh.reduce_scatter(scattered_tensor, local_rs_list, mesh_dim=dim) res_num = torch.sum(torch.tensor(global_ranks)) self.assertEqual(scattered_tensor, torch.ones(3, 3, 3) * res_num) @with_comms def test_all_reduce_nd(self): mesh_tensor = torch.arange(8).reshape(2, 2, 2) mesh = DeviceMesh(self.device_type, mesh_tensor) local_tensor = torch.ones(3, 3, device=self.device_type) * self.rank # check all dim groups dim_to_subgroups = mesh.get_dim_groups() for dim, dim_group in enumerate(dim_to_subgroups): dim_group_size = get_world_size(dim_group) global_ranks = [ get_global_rank(dim_group, i) for i in range(dim_group_size) ] cloned_local_tensor = local_tensor.clone() mesh.all_reduce(cloned_local_tensor, mesh_dim=dim) res_num = sum(global_ranks) self.assertEqual(cloned_local_tensor, torch.ones(3, 3) * res_num) @with_comms def test_broadcast_nd(self): mesh_tensor = torch.arange(8).reshape(2, 2, 2) mesh = DeviceMesh(self.device_type, mesh_tensor) local_tensor = torch.ones(3, 3, device=self.device_type) * self.rank # check all dim groups dim_to_subgroups = mesh.get_dim_groups() for dim, dim_group in enumerate(dim_to_subgroups): dim_group_size = get_world_size(dim_group) global_ranks = [ get_global_rank(dim_group, i) for i in range(dim_group_size) ] cloned_local_tensor = local_tensor.clone() mesh.broadcast(cloned_local_tensor, mesh_dim=dim) res_num = global_ranks[0] self.assertEqual(cloned_local_tensor, torch.ones(3, 3) * res_num) @with_comms def test_scatter_nd(self): mesh_tensor = torch.arange(8).reshape(2, 2, 2) mesh = DeviceMesh(self.device_type, mesh_tensor) # check all dim groups dim_to_subgroups = mesh.get_dim_groups() for dim, dim_group in enumerate(dim_to_subgroups): dim_group_size = get_world_size(dim_group) global_ranks = [ get_global_rank(dim_group, i) for i in range(dim_group_size) ] scattered_tensors = [ torch.ones(3, 3, device=self.device_type) * global_rank for global_rank in global_ranks ] received_tensor = torch.empty_like( scattered_tensors[mesh.get_coordinate_on_dim(dim)] ) mesh.scatter(received_tensor, scattered_tensors, mesh_dim=dim) self.assertEqual(received_tensor, torch.ones(3, 3) * self.rank) @with_comms def test_all_to_all_1d(self): # transpose on a 2D tensor distributed over N nodes: mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) tensor_shape = [3, 3] input_tensor_list = [ torch.ones(*tensor_shape, device=self.device_type) * (rank + self.rank * self.world_size) for rank in range(self.world_size) ] expected_tensor_list = [ torch.ones(tensor_shape, device=self.device_type) * (self.rank + rank * self.world_size) # i.e. transpose for rank in range(self.world_size) ] for scatter_dim in range(len(tensor_shape)): output_tensor_list = [ torch.empty_like(input_tensor_list[idx]) for idx in range(len(input_tensor_list)) ] # scatter on dim > 0 would generate non-contiguous tensor, verify that works mesh.all_to_all(output_tensor_list, input_tensor_list, mesh_dim=0) output_tensor = torch.cat(output_tensor_list, dim=scatter_dim) expected_tensor = torch.cat(expected_tensor_list, dim=scatter_dim) self.assertEqual(output_tensor, expected_tensor) @with_comms def test_all_to_all_nd(self): mesh_tensor = torch.arange(8).reshape(2, 2, 2) mesh = DeviceMesh(self.device_type, mesh_tensor) tensor_shape = [3, 3, 3] # check all dim groups dim_to_subgroups = mesh.get_dim_groups() for dim, dim_group in enumerate(dim_to_subgroups): my_coordinate = mesh.get_coordinate_on_dim(dim) dim_group_size = get_world_size(dim_group) global_ranks = [ get_global_rank(dim_group, i) for i in range(dim_group_size) ] input_tensor_list = [ torch.ones(*tensor_shape, device=self.device_type) * (i + self.rank * dim_group_size) for i in range(dim_group_size) ] expected_tensor_list = [ torch.ones(*tensor_shape, device=self.device_type) * (my_coordinate + global_rank * dim_group_size) # i.e. transpose for global_rank in global_ranks ] for scatter_dim in range(len(tensor_shape)): # input_tensor = torch.cat(input_tensor_list, dim=scatter_dim) output_tensor_list = [ torch.empty_like(input_tensor_list[idx]) for idx in range(len(input_tensor_list)) ] # scatter on dim > 0 would generate non-contiguous tensor, verify that works mesh.all_to_all(output_tensor_list, input_tensor_list, mesh_dim=dim) output_tensor = torch.cat(output_tensor_list, dim=scatter_dim) expected_tensor = torch.cat(expected_tensor_list, dim=scatter_dim) self.assertEqual(output_tensor, expected_tensor) if __name__ == "__main__": run_tests()
import os import sys import torch from torch.distributed._tensor.device_mesh import DeviceMesh from torch.distributed._tensor.placement_types import Shard from torch.distributed.distributed_c10d import ( get_global_rank, get_world_size, is_initialized, new_group, ProcessGroup, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.common_distributed import TEST_SKIPS
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/_tensor/test_api.py
test_distribute_module_meta
def test_distribute_module_meta(self): # If the model is too big, the user may first the create entire model on the meta device and then initialize # it on the device in the partition function. device_mesh = DeviceMesh(self.device_type, list(range(self.world_size))) # fully shard all parameters on dim 0 module_to_shard = MyModel(5 * self.world_size, 20, device="meta") shard_spec = [Shard(0)] def shard_fn(name, module, device_mesh): for param_name, param in module._parameters.items(): dist_param = distribute_tensor(param, device_mesh, shard_spec) dist_param = torch.empty_like( dist_param, device=device_mesh.device_type ) module.register_parameter(param_name, torch.nn.Parameter(dist_param)) sharded_module = distribute_module(module_to_shard, device_mesh, shard_fn) for param in sharded_module.parameters(): self.assertIsInstance(param, DTensor) self.assertFalse(param.is_meta) self.assertTrue(param.device.type == device_mesh.device_type)
import torch import torch.nn as nn from torch.distributed._tensor import ( DeviceMesh, distribute_module, distribute_tensor, DTensor, Replicate, Shard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) class DTensorAPITest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_device_mesh.py
test_all_to_all_nd
def test_all_to_all_nd(self): mesh_tensor = torch.arange(8).reshape(2, 2, 2) mesh = DeviceMesh(self.device_type, mesh_tensor) tensor_shape = [3, 3, 3] # check all dim groups dim_to_subgroups = mesh.get_dim_groups() for dim, dim_group in enumerate(dim_to_subgroups): my_coordinate = mesh.get_coordinate_on_dim(dim) dim_group_size = get_world_size(dim_group) global_ranks = [ get_global_rank(dim_group, i) for i in range(dim_group_size) ] input_tensor_list = [ torch.ones(*tensor_shape, device=self.device_type) * (i + self.rank * dim_group_size) for i in range(dim_group_size) ] expected_tensor_list = [ torch.ones(*tensor_shape, device=self.device_type) * (my_coordinate + global_rank * dim_group_size) # i.e. transpose for global_rank in global_ranks ] for scatter_dim in range(len(tensor_shape)): # input_tensor = torch.cat(input_tensor_list, dim=scatter_dim) output_tensor_list = [ torch.empty_like(input_tensor_list[idx]) for idx in range(len(input_tensor_list)) ] # scatter on dim > 0 would generate non-contiguous tensor, verify that works mesh.all_to_all(output_tensor_list, input_tensor_list, mesh_dim=dim) output_tensor = torch.cat(output_tensor_list, dim=scatter_dim) expected_tensor = torch.cat(expected_tensor_list, dim=scatter_dim) self.assertEqual(output_tensor, expected_tensor)
import os import sys import torch from torch.distributed._tensor.device_mesh import DeviceMesh from torch.distributed._tensor.placement_types import Shard from torch.distributed.distributed_c10d import ( get_global_rank, get_world_size, is_initialized, new_group, ProcessGroup, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.common_distributed import TEST_SKIPS class DeviceMeshCollectiveTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/_tensor/test_dtensor_compile.py
test_dtensor_constructor_w_graph_break
def test_dtensor_constructor_w_graph_break(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) x = torch.randn(64, 32, requires_grad=True) spec = DTensorSpec( mesh, (Replicate(), Shard(0)), tensor_meta=TensorMeta( shape=torch.Size([128, 32]), stride=(32, 1), dtype=x.dtype ), ) # test passing in DTensor as inputs/outputs and run some tensor computation def fn(x): print("graph break!") return DTensor( x, spec, requires_grad=x.requires_grad, ) out = fn(x) out2 = torch.compile(fn, backend="eager")(x)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_dtensor_constructor_w_dynamo_disable
def test_dtensor_constructor_w_dynamo_disable(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) x = torch.randn(32, requires_grad=True) spec = DTensorSpec( mesh, (Replicate(),), tensor_meta=TensorMeta(shape=torch.Size([32]), stride=(1,), dtype=x.dtype), ) @torch._dynamo.disable(recursive=False) def fn(x): print("foo") return DTensor( x, spec, requires_grad=x.requires_grad, ) out = fn(x) out2 = torch.compile(fn, backend="eager")(x) self.assertEqual(out, out2)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_dtensor_noncontiguous_output
def test_dtensor_noncontiguous_output(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) # test passing in DTensor as inputs/outputs and run some tensor computation def fn(x, y, z): x_transposed = x.permute(0, 2, 1).contiguous() tmp = torch._C._nn.linear(x_transposed, y, z) return tmp.permute(0, 2, 1) x_inner = torch.randn(4, 16, 4, requires_grad=True) y_inner = torch.randn(4, 16, requires_grad=True) z_inner = torch.randn(4, requires_grad=True) x = DTensor.from_local(x_inner, mesh, [Shard(1)], run_check=False) y = DTensor.from_local(y_inner, mesh, [Shard(1)], run_check=False) z = DTensor.from_local(z_inner, mesh, [Replicate()], run_check=False) out = torch.compile(fn, backend="aot_eager", fullgraph=True)(x, y, z) out.contiguous().sum().backward()
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_dynamo_dtensor_from_local
def test_dynamo_dtensor_from_local(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) # create DTensor inside fn and run some compute def fn(x): dt = DTensor.from_local(x, mesh, [Replicate()], run_check=False) return dt.to_local() + 2 # below is the op approach for reference # from torch.distributed._tensor.api import _FromTorchTensor # def from_local_tensor(x): # return _FromTorchTensor.apply(x, mesh, [Replicate()], False) # _dt_lib_def = torch.library.Library("dtensor", "DEF") # _dt_lib_def.define("from_local(Tensor self) -> Tensor") # _dt_lib_impl = torch.library.Library("dtensor", "IMPL") # _dt_lib_impl.impl("from_local", from_local_tensor, "Autograd") x = torch.ones(1, requires_grad=True) ref = fn(x) cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") opt_fn = torch.compile(fn, backend=cnt, fullgraph=True) res = opt_fn(x) # backward should work as well res.sum().backward() self.assertEqual(res, ref) self.assertEqual(cnt.frame_count, 1) # test if user calls from_local with mesh/placements as kwargs and that should still work def from_local_kwargs_fn(x): dt = DTensor.from_local( x, device_mesh=mesh, placements=[Replicate()], run_check=False ) return dt.to_local() + 2 ref = from_local_kwargs_fn(x) opt_kwargs_fn = torch.compile(from_local_kwargs_fn, backend=cnt, fullgraph=True) res = opt_kwargs_fn(x) self.assertEqual(res, ref) self.assertEqual(cnt.frame_count, 2)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
from_local_kwargs_fn
def from_local_kwargs_fn(x): dt = DTensor.from_local( x, device_mesh=mesh, placements=[Replicate()], run_check=False ) return dt.to_local() + 2 ref = from_local_kwargs_fn(x) opt_kwargs_fn = torch.compile(from_local_kwargs_fn, backend=cnt, fullgraph=True) res = opt_kwargs_fn(x) self.assertEqual(res, ref) self.assertEqual(cnt.frame_count, 2)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_dynamo_dtensor_from_local_dynamic_shapes
def test_dynamo_dtensor_from_local_dynamic_shapes(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) # Case 1: all dims dynamic def fn(x): dt = DTensor.from_local( x, mesh, [Replicate()], run_check=False, shape=x.shape, stride=x.stride(), ) return dt.to_local() + 2 inp = torch.randn(4, 6, requires_grad=True) ref = fn(inp) cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") res = torch.compile(fn, backend=cnt, fullgraph=True, dynamic=True)(inp) res.sum().backward() self.assertEqual(res, ref) self.assertEqual(cnt.frame_count, 1) # Case 2: only sizes are dynamic, strides are static def fn(x): dt = DTensor.from_local( x, mesh, [Replicate()], run_check=False, shape=x.shape, stride=(1,) ) return dt.to_local() + 2 inp = torch.randn(4, requires_grad=True) torch._dynamo.mark_dynamic(inp, 0) ref = fn(inp) cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") res = torch.compile(fn, backend=cnt, fullgraph=True)(inp) res.sum().backward() self.assertEqual(res, ref) self.assertEqual(cnt.frame_count, 1) # Case 3: both sizes and strides have a mix of dynamic and static dims def fn(x): dt = DTensor.from_local( x, mesh, [Replicate()], run_check=False, shape=(x.shape[0], x.shape[1], 2), stride=(x.stride()[0], 2, 1), ) return dt.to_local() + 2 inp = torch.randn(4, 6, 2, requires_grad=True) torch._dynamo.mark_dynamic(inp, 0) torch._dynamo.mark_dynamic(inp, 1) ref = fn(inp) cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") res = torch.compile(fn, backend=cnt, fullgraph=True)(inp) res.sum().backward() self.assertEqual(res, ref) self.assertEqual(cnt.frame_count, 1)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
setUp
def setUp(self): super().setUp() fake_store = FakeStore() dist.init_process_group( "fake", store=fake_store, rank=0, world_size=self.world_size )
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_fakify_dtensor
def test_fakify_dtensor(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) # pass in DTensor as inputs/outputs to the function def fn(x): return x x = DTensor.from_local(torch.rand(1), mesh, [Shard(0)], run_check=False) ref = fn(x) opt_fn = torch.compile(fn, backend="aot_eager", fullgraph=True) res = opt_fn(x) self.assertEqual(res, ref)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_dynamo_dtensor
def test_dynamo_dtensor(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) # test passing in DTensor as inputs/outputs and run some tensor computation def fn(x): return x * x + 2 x = DTensor.from_local(torch.rand(1), mesh, [Shard(0)], run_check=False) ref = fn(x) opt_fn = torch.compile(fn, backend="aot_eager", fullgraph=True) res = opt_fn(x) self.assertEqual(res, ref)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_dtensor_attribute_access_on_intermediate
def test_dtensor_attribute_access_on_intermediate(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) def fn(x): tmp = x * 2 if tmp.placements[0].is_shard(): return tmp._local_tensor + 2 else: return tmp._local_tensor + 3 x = DTensor.from_local(torch.ones(4), mesh, [Shard(0)], run_check=False) ref = fn(x) opt_fn = torch.compile(fn, backend="aot_eager", fullgraph=True) res = opt_fn(x) self.assertEqual(res, ref)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_dynamo_dtensor_recompile
def test_dynamo_dtensor_recompile(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) # test passing in DTensor as inputs/outputs and run some tensor computation def fn(x): return torch.mul(x, x) x = DTensor.from_local(torch.rand(2, 2), mesh, [Shard(0)], run_check=False) x2 = DTensor.from_local(torch.rand(2, 2), mesh, [Shard(0)], run_check=False) x3 = DTensor.from_local(torch.rand(2, 2), mesh, [Shard(1)], run_check=False) cnt = torch._dynamo.testing.CompileCounter() opt_fn = torch.compile(fn, backend=cnt, fullgraph=True, dynamic=False) self.assertEqual(fn(x), opt_fn(x)) self.assertEqual(cnt.frame_count, 1) self.assertEqual(fn(x2), opt_fn(x2)) self.assertEqual(cnt.frame_count, 1) self.assertEqual(fn(x3), opt_fn(x3)) self.assertEqual(cnt.frame_count, 2)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_dtensor_contiguous_dtensor_noncontiguous_local_as_tangent
def test_dtensor_contiguous_dtensor_noncontiguous_local_as_tangent(self): # Partial -> Shard on an unbalanced tensor results in: # - A contiguous DTensor # - where the inner _local_tensor is noncontiguous # When this tensor is a fwd graph output, # AOTAutograd needs to make sure we trace the backward # with a contiguous tangent placement = Shard(1) def fn(x): out = x.redistribute(mesh, [placement]) return out # Temporarily ignore setUp(), and use rank3 graphs during tracing dist.destroy_process_group() fake_store = FakeStore() dist.init_process_group("fake", store=fake_store, rank=3, world_size=2) mesh = DeviceMesh(self.device_type, [1, 3]) x = torch.randn(10, 257, 160, requires_grad=True) x_dt = DTensor.from_local( x, mesh, [Partial()], run_check=False, shape=(10, 257, 160), stride=(41120, 160, 1), ) out_dt = torch.compile(fn)(x_dt) # If we don't properly contiguify our traced tangents, # this fails with an inductor stride assert out_dt.to_local().sum().backward()
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_dynamo_to_local_kwargs
def test_dynamo_to_local_kwargs(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) def fn(x): return dt.to_local(grad_placements=[Shard(0)]) + 2 fn_opt = torch.compile(fn, backend="aot_eager", fullgraph=True) x = torch.ones(4) dt = DTensor.from_local(x, mesh, [Replicate()], run_check=False) out_ref = fn(dt) out_test = fn_opt(dt) self.assertEqual(out_ref, out_test)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fw_hook
def fw_hook(module, inp, out): tmp = out.to_local(grad_placements=out.placements) + 2 return DTensor.from_local(tmp, mesh, out.placements, run_check=False) mod = torch.nn.Linear(4, 4) mod.register_forward_hook(fw_hook) mod = torch.nn.Linear(4, 4) mod.register_forward_hook(fw_hook) mod.weight = torch.nn.Parameter( DTensor.from_local(mod.weight, mesh, [Replicate()], run_check=False) ) mod.bias = torch.nn.Parameter( DTensor.from_local(mod.bias, mesh, [Replicate()], run_check=False) ) opt_mod = torch.compile(mod, backend="aot_eager", fullgraph=True) x = torch.ones(4, 4) dt = DTensor.from_local(x, mesh, [Replicate()], run_check=False) out_ref = mod(dt) out_test = opt_mod(dt) self.assertEqual(out_ref, out_test)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_dtensor_different_gradient_placement
def test_dtensor_different_gradient_placement(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) def fn(x, y, z): permute = x.permute(0, 2, 1) permute2 = permute.contiguous() layer_norm = torch.nn.functional.layer_norm(permute2, (4,), y, z, 1e-05) out = layer_norm.permute(0, 2, 1) return out x = torch.randn(4, 2, 4, requires_grad=True, device="cuda") x_dt = DTensor.from_local(x, mesh, [Shard(1)], run_check=False) y = torch.randn(4, requires_grad=True, device="cuda") y_dt = DTensor.from_local(y, mesh, [Replicate()], run_check=False) z = torch.randn(4, requires_grad=True, device="cuda") z_dt = DTensor.from_local(z, mesh, [Replicate()], run_check=False) opt_fn = torch.compile(fn, backend="inductor", fullgraph=True) tmp_dt = opt_fn(x_dt, y_dt, z_dt) out_dt = torch.matmul(tmp_dt, x_dt).permute(0, 2, 1) out_dt.sum().backward()
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_dynamo_dtensor_from_local_redistribute
def test_dynamo_dtensor_from_local_redistribute(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) # pass in tensor as inputs/outputs, create DTensor and run redistribute # (allgather collective) inside the fn def fn(x): dt = DTensor.from_local(x, mesh, [Shard(0)], run_check=False) return dt.redistribute(mesh, [Replicate()]).to_local() + 2 x = torch.ones(1) ref = fn(x) cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") opt_fn = torch.compile(fn, backend=cnt, fullgraph=True) res = opt_fn(x) self.assertEqual(res, ref) def redistribute_kwargs_fn(x): dt = DTensor.from_local(x, mesh, [Shard(0)], run_check=False) return ( dt.redistribute(device_mesh=mesh, placements=[Replicate()]).to_local() + 2 ) x = torch.ones(1) ref = redistribute_kwargs_fn(x) opt_kwargs_fn = torch.compile( redistribute_kwargs_fn, backend=cnt, fullgraph=True ) res = opt_kwargs_fn(x) self.assertEqual(res, ref)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
redistribute_kwargs_fn
def redistribute_kwargs_fn(x): dt = DTensor.from_local(x, mesh, [Shard(0)], run_check=False) return ( dt.redistribute(device_mesh=mesh, placements=[Replicate()]).to_local() + 2 ) x = torch.ones(1) ref = redistribute_kwargs_fn(x) opt_kwargs_fn = torch.compile( redistribute_kwargs_fn, backend=cnt, fullgraph=True ) res = opt_kwargs_fn(x) self.assertEqual(res, ref)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_dtensor_dont_recompile_on_same_placement_devicemesh
def test_dtensor_dont_recompile_on_same_placement_devicemesh(self): cnt = torch._dynamo.testing.CompileCounterWithBackend("inductor") @torch.compile(backend=cnt) def fn(x): dt = DTensor.from_local(x, mesh, [placement], run_check=False) x = torch.ones(4, 4, requires_grad=True) mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) placement = Shard(1) fn(x) mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) placement = Shard(1) # no recompile, placement is unchanged fn(x) mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) placement = Partial() # recompile since placement is different fn(x) mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) placement = Partial() # no recompile, placement is unchanged fn(x) # 2 total frames (one for Partial(), one for Shard()) self.assertEqual(cnt.frame_count, 2)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor.py
add_scalar_tensor_with_dtensor
def add_scalar_tensor_with_dtensor(): return ndim_0_tensor + sharded_dtensor result = add_scalar_tensor_with_dtensor().to_local() self.assertEqual(result, local_tensor + ndim_0_tensor) self.assertNotWarn( add_scalar_tensor_with_dtensor, "Found a non-scalar tensor with numel=1 and ndim!=0", ) # automatically turn tensor to DTensor replicate when ndim = 1 and numel = 1 numel_1_tensor = torch.tensor([1], device=self.device_type) self.assertEqual( (numel_1_tensor + sharded_dtensor).to_local(), numel_1_tensor + local_tensor )
import os from numpy.testing import assert_array_equal import torch import torch.nn.functional as F from torch.distributed._functional_collectives import AsyncCollectiveTensor from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, init_device_mesh, ) from torch.distributed._tensor.experimental import implicit_replication from torch.distributed._tensor.placement_types import ( DTensorSpec, Partial, Replicate, Shard, TensorMeta, ) from torch.distributed.tensor.debug import CommDebugMode from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.serialization import safe_globals from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.logging_utils import LoggingTestCase c10d_functional = torch.ops.c10d_functional import io from torch.distributed._tensor._utils import ( compute_local_shape_and_global_offset, ) from torch.distributed.tensor._collective_utils import unpad_tensor
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor.py
test_metadata_consistency_check
def test_metadata_consistency_check(self): device_mesh = DeviceMesh(self.device_type, list(range(self.world_size))) placements = [Shard(0)] # Create a local tensor with specific metadata and check dtype change local_tensor = torch.randn(3, 3, requires_grad=True, dtype=torch.float32) if self.rank == 0: local_tensor = local_tensor.to(dtype=torch.float64) with self.assertRaises(ValueError): DTensor.from_local(local_tensor, device_mesh, placements, run_check=True) try: DTensor.from_local(local_tensor, device_mesh, placements, run_check=False) except ValueError: self.fail("Unexpected ValueError raised with run_check=False") # Create a local tensor with specific metadata and check requires_grad change local_tensor = torch.randn(3, 3, requires_grad=True, dtype=torch.float32) if self.rank == 0: local_tensor.requires_grad = False with self.assertRaises(ValueError): DTensor.from_local(local_tensor, device_mesh, placements, run_check=True) try: DTensor.from_local(local_tensor, device_mesh, placements, run_check=False) except ValueError: self.fail("Unexpected ValueError raised with run_check=False") # Create a local tensor with specific metadata and check stride change local_tensor = torch.randn(3, 4, requires_grad=True, dtype=torch.float32) if self.rank == 0: local_tensor = local_tensor.t() # transpose changes the stride with self.assertRaises(ValueError): DTensor.from_local(local_tensor, device_mesh, placements, run_check=True) try: DTensor.from_local(local_tensor, device_mesh, placements, run_check=False) except ValueError: self.fail("Unexpected ValueError raised with run_check=False")
import os from numpy.testing import assert_array_equal import torch import torch.nn.functional as F from torch.distributed._functional_collectives import AsyncCollectiveTensor from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, init_device_mesh, ) from torch.distributed._tensor.experimental import implicit_replication from torch.distributed._tensor.placement_types import ( DTensorSpec, Partial, Replicate, Shard, TensorMeta, ) from torch.distributed.tensor.debug import CommDebugMode from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.serialization import safe_globals from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.logging_utils import LoggingTestCase c10d_functional = torch.ops.c10d_functional import io class DTensorMeshTest(DTensorTestBase): from torch.distributed._tensor._utils import ( compute_local_shape_and_global_offset, ) from torch.distributed.tensor._collective_utils import unpad_tensor
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
__init__
def __init__(self, device): super().__init__() self.mlp_0 = MLPModule(device) self.mlp_1 = MLPModule(device)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint class SimpleModel(nn.Module): from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
forward
def forward(self, input): return self.mlp_1(self.mlp_0(input))
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint class SimpleModel(nn.Module): from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_dtensor_dynamo_device_mesh_attrs
def test_dtensor_dynamo_device_mesh_attrs(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) # pass in tensor as inputs/outputs, create DTensor and run redistribute # (allgather collective) inside the fn def fn(x_dt): if x_dt.device_mesh.device_type == "cuda": return x_dt + 1 else: return x_dt + 2 x = torch.ones(4, 4) x_dt = DTensor.from_local(x, mesh, [Shard(0)], run_check=False) ref = fn(x_dt) opt_fn = torch.compile(fn, backend="eager", fullgraph=True) res = opt_fn(x_dt) self.assertEqual(ref, res)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
forward
def forward(self, input): return self.mlp_1(self.mlp_0(input))
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint class SimpleModel(nn.Module): from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_tp_compile_comm_reordering
def test_tp_compile_comm_reordering(self): class FakeAttention(nn.Module): def __init__(self) -> None: super().__init__() self.wq = nn.Linear(16, 16) self.wk = nn.Linear(16, 16) self.wv = nn.Linear(16, 16) self.wo = nn.Linear(16, 16) def forward(self, x): xq = self.wq(x) xk = self.wk(x) xv = self.wv(x) # fake attention: xo = xq + xk + xv return self.wo(xo) class FakeTransformerBlock(nn.Module): def __init__(self) -> None: super().__init__() self.attn = FakeAttention() def forward(self, x): return self.attn(x) class FakeTransformer(nn.Module): def __init__(self) -> None: super().__init__() self.block = FakeTransformerBlock() def forward(self, input): return self.block(input) model = FakeTransformer().to(self.device_type) tp_mesh = init_device_mesh("cuda", (2,), mesh_dim_names=("tp",)) # apply sequence parallel parallel_plan = { "attn": PrepareModuleInput( input_layouts=Shard(0), desired_input_layouts=Replicate() ), "attn.wq": ColwiseParallel(), "attn.wk": ColwiseParallel(), "attn.wv": ColwiseParallel(), "attn.wo": RowwiseParallel(output_layouts=Shard(0)), } parallelize_module( module=model.block, device_mesh=tp_mesh, parallelize_plan=parallel_plan, ) cnt = torch._dynamo.testing.CompileCounterWithBackend("inductor") compiled_model = torch.compile(model, backend=cnt, fullgraph=True) inp = torch.rand(20, 16).to(self.device_type) out = compiled_model(inp) out.sum().backward() self.assertEqual(cnt.frame_count, 1) code = run_and_get_triton_code(compiled_model, inp) FileCheck().check( "buf0 = torch.ops._c10d_functional.all_gather_into_tensor.default(primal" ).check("torch.ops._c10d_functional.wait_tensor.default(buf0").check( "extern_kernels.mm(buf0," ).run( code )
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
forward
def forward(self, input): return self.mlp_1(self.mlp_0(input))
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint class SimpleModel(nn.Module): from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
forward
def forward(self, input): return self.mlp_1(self.mlp_0(input))
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint class SimpleModel(nn.Module): from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_op_strategy.py
test_bmm_2d_mesh
def test_bmm_2d_mesh(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size).reshape(2, 2)) all_strats = gen_einsum_strategies("bmk,bkn->bmn", mesh) self.assertEqual(len(all_strats.strategies), 25)
from itertools import chain import torch from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import ( DTensorSpec, Partial, Replicate, Shard, TensorMeta, ) from torch.distributed.tensor._collective_utils import redistribute_cost from torch.distributed.tensor._op_schema import OpSchema, OpStrategy, PlacementStrategy from torch.distributed.tensor._ops._einsum_strategy import ( EinsumDims, gen_einsum_strategies, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import DTensorOpTestBase class TestEinsumStrategies(DTensorOpTestBase): from torch.distributed.tensor._ops._matrix_ops import addmm_strategy from torch.distributed.tensor._ops._matrix_ops import mm_strategy from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_op_strategy.py
test_pointwise_1d_mesh
def test_pointwise_1d_mesh(self): mesh = self.build_device_mesh() simple_strats = gen_einsum_strategies("abcd,abcd->abcd", mesh) self.assertEqual(len(simple_strats.strategies), 5) broadcast_strats = gen_einsum_strategies("bcd,abcd->abcd", mesh) self.assertEqual(len(broadcast_strats.strategies), 5)
from itertools import chain import torch from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import ( DTensorSpec, Partial, Replicate, Shard, TensorMeta, ) from torch.distributed.tensor._collective_utils import redistribute_cost from torch.distributed.tensor._op_schema import OpSchema, OpStrategy, PlacementStrategy from torch.distributed.tensor._ops._einsum_strategy import ( EinsumDims, gen_einsum_strategies, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import DTensorOpTestBase class TestEinsumStrategies(DTensorOpTestBase): from torch.distributed.tensor._ops._matrix_ops import addmm_strategy from torch.distributed.tensor._ops._matrix_ops import mm_strategy from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_op_strategy.py
test_linearity_1d_mesh
def test_linearity_1d_mesh(self): mesh = self.build_device_mesh() all_strats = gen_einsum_strategies("abcd,abcd->abcd", mesh, linearity=True) self.assertEqual(len(all_strats.strategies), 6)
from itertools import chain import torch from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import ( DTensorSpec, Partial, Replicate, Shard, TensorMeta, ) from torch.distributed.tensor._collective_utils import redistribute_cost from torch.distributed.tensor._op_schema import OpSchema, OpStrategy, PlacementStrategy from torch.distributed.tensor._ops._einsum_strategy import ( EinsumDims, gen_einsum_strategies, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import DTensorOpTestBase class TestEinsumStrategies(DTensorOpTestBase): from torch.distributed.tensor._ops._matrix_ops import addmm_strategy from torch.distributed.tensor._ops._matrix_ops import mm_strategy from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_op_strategy.py
test_redistribute_cost_mesh_1d
def test_redistribute_cost_mesh_1d(self): mesh_1d = self.build_device_mesh() shard_placement = (Shard(0),) replica_placement = (Replicate(),) partial_placement = (Partial(),) global_tensor = torch.randn(10, 10) global_tensor_meta = self._extract_tensor_meta(global_tensor) # shard spec shard_spec = DTensorSpec(mesh_1d, shard_placement, global_tensor_meta) # replica spec replica_spec = DTensorSpec(mesh_1d, replica_placement, global_tensor_meta) # partial spec partial_spec = DTensorSpec(mesh_1d, partial_placement, global_tensor_meta) # make sure reshard cost is 0 for the same spec redistribute for spec in [shard_spec, replica_spec, partial_spec]: cost = redistribute_cost(spec, spec) self.assertEqual(cost, 0) # shard -> replicate allgather_cost = redistribute_cost(shard_spec, replica_spec) # partial -> shard reduce_scatter_cost = redistribute_cost(partial_spec, shard_spec) # partial -> replicate allreduce_cost = redistribute_cost(partial_spec, replica_spec) self.assertEqual(allgather_cost, reduce_scatter_cost) self.assertTrue(allreduce_cost + 1 < allgather_cost + reduce_scatter_cost) # shard to partial cost = redistribute_cost(shard_spec, partial_spec) self.assertEqual(cost, float("inf"))
from itertools import chain import torch from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import ( DTensorSpec, Partial, Replicate, Shard, TensorMeta, ) from torch.distributed.tensor._collective_utils import redistribute_cost from torch.distributed.tensor._op_schema import OpSchema, OpStrategy, PlacementStrategy from torch.distributed.tensor._ops._einsum_strategy import ( EinsumDims, gen_einsum_strategies, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import DTensorOpTestBase class TestCostModel(DTensorOpTestBase): from torch.distributed.tensor._ops._matrix_ops import addmm_strategy from torch.distributed.tensor._ops._matrix_ops import mm_strategy from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_op_strategy.py
test_redistribute_cost_latency
def test_redistribute_cost_latency(self): # test cost model on addmm op from torch.distributed.tensor._ops._matrix_ops import addmm_strategy mesh = self.build_device_mesh() shard0_placement = (Shard(0),) partial_placement = (Partial(),) shard1_placement = (Shard(1),) shard0_tensor_meta = self._extract_tensor_meta(torch.randn(8)) partial_tensor_meta = self._extract_tensor_meta(torch.randn(50, 6)) shard1_tensor_meta = self._extract_tensor_meta(torch.randn(6, 8)) # shard spec shard0_spec = DTensorSpec(mesh, shard0_placement, shard0_tensor_meta) # replica spec partial_spec = DTensorSpec(mesh, partial_placement, partial_tensor_meta) # partial spec shard1_spec = DTensorSpec(mesh, shard1_placement, shard1_tensor_meta) op_schema = OpSchema( torch.ops.aten.addmm.default, ( OpStrategy([PlacementStrategy(shard0_spec)]), OpStrategy([PlacementStrategy(partial_spec)]), OpStrategy([PlacementStrategy(shard1_spec)]), ), {}, ) output_strategy = addmm_strategy(mesh, op_schema) strategy_costs = {} for strategy in output_strategy.strategies: redistribute_cost = sum(chain.from_iterable(strategy.redistribute_cost)) strategy_costs[str(strategy)] = redistribute_cost # assert that cost model counts for collective latency (i.e. multiple comm is penalized) self.assertTrue( strategy_costs["(S(0), R, S(1)) -> S(1)"] < strategy_costs["(R, S(0), R) -> S(0)"] ) # assert a single allreduce is the best one self.assertEqual( strategy_costs["(S(0), R, S(1)) -> S(1)"], min(strategy_costs.values()) )
from itertools import chain import torch from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import ( DTensorSpec, Partial, Replicate, Shard, TensorMeta, ) from torch.distributed.tensor._collective_utils import redistribute_cost from torch.distributed.tensor._op_schema import OpSchema, OpStrategy, PlacementStrategy from torch.distributed.tensor._ops._einsum_strategy import ( EinsumDims, gen_einsum_strategies, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import DTensorOpTestBase class TestCostModel(DTensorOpTestBase): from torch.distributed.tensor._ops._matrix_ops import addmm_strategy from torch.distributed.tensor._ops._matrix_ops import mm_strategy from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_op_strategy.py
test_redistribute_cost_mesh_2d
def test_redistribute_cost_mesh_2d(self): mesh_2d = DeviceMesh( self.device_type, torch.arange(self.world_size).reshape(2, 2) ) shard_placement = (Shard(0), Shard(0)) replica_placement = (Replicate(), Replicate()) partial_placement = (Partial(), Partial()) global_tensor = torch.randn(8, 8) global_tensor_meta = self._extract_tensor_meta(global_tensor) # shard spec shard_spec = DTensorSpec(mesh_2d, shard_placement, global_tensor_meta) # replica spec replica_spec = DTensorSpec(mesh_2d, replica_placement, global_tensor_meta) # partial spec partial_spec = DTensorSpec(mesh_2d, partial_placement, global_tensor_meta) # make sure reshard cost is 0 for the same spec redistribute for spec in [shard_spec, replica_spec, partial_spec]: cost = redistribute_cost(spec, spec) self.assertEqual(cost, 0) # shard -> replicate allgather_cost = redistribute_cost(shard_spec, replica_spec) # partial -> replicate allreduce_cost = redistribute_cost(partial_spec, replica_spec) # partial -> shard reduce_scatter_cost = redistribute_cost(partial_spec, shard_spec) self.assertTrue(allreduce_cost > allgather_cost) self.assertTrue(allreduce_cost > reduce_scatter_cost)
from itertools import chain import torch from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import ( DTensorSpec, Partial, Replicate, Shard, TensorMeta, ) from torch.distributed.tensor._collective_utils import redistribute_cost from torch.distributed.tensor._op_schema import OpSchema, OpStrategy, PlacementStrategy from torch.distributed.tensor._ops._einsum_strategy import ( EinsumDims, gen_einsum_strategies, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import DTensorOpTestBase class TestCostModel(DTensorOpTestBase): from torch.distributed.tensor._ops._matrix_ops import addmm_strategy from torch.distributed.tensor._ops._matrix_ops import mm_strategy from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_op_strategy.py
test_mm_strategies
def test_mm_strategies(self): from torch.distributed.tensor._ops._matrix_ops import mm_strategy mesh = self.build_device_mesh() lhs_tensor = torch.randn(6, 8) rhs_tensor = torch.randn(8, 12) lhs_tensor_meta = self._extract_tensor_meta(lhs_tensor) rhs_tensor_meta = self._extract_tensor_meta(rhs_tensor) mm_combs = ( (Shard(0), Replicate()), (Replicate(), Shard(1)), (Shard(1), Shard(0)), (Replicate(), Replicate()), ) for lhs, rhs in mm_combs: lhs_spec = DTensorSpec(mesh, (lhs,), lhs_tensor_meta) rhs_spec = DTensorSpec(mesh, (rhs,), rhs_tensor_meta) op_schema = OpSchema( torch.ops.aten.mm.default, ( OpStrategy([PlacementStrategy(lhs_spec)]), OpStrategy([PlacementStrategy(rhs_spec)]), ), {}, ) # test the strategy res_strategies = mm_strategy(mesh, op_schema) for strtgy in res_strategies.strategies: if strtgy.input_specs == (lhs_spec, rhs_spec): self.assertEqual(strtgy.redistribute_cost, [[0.0], [0.0]]) break op_schema = OpSchema( torch.ops.aten.mm.default, (lhs_spec, rhs_spec), {}, ) # test sharding prop output_sharding = DTensor._op_dispatcher.sharding_propagator.propagate_op_sharding_non_cached( op_schema ) self.assertFalse(output_sharding.needs_redistribute)
from itertools import chain import torch from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import ( DTensorSpec, Partial, Replicate, Shard, TensorMeta, ) from torch.distributed.tensor._collective_utils import redistribute_cost from torch.distributed.tensor._op_schema import OpSchema, OpStrategy, PlacementStrategy from torch.distributed.tensor._ops._einsum_strategy import ( EinsumDims, gen_einsum_strategies, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import DTensorOpTestBase class TestCostModel(DTensorOpTestBase): from torch.distributed.tensor._ops._matrix_ops import addmm_strategy from torch.distributed.tensor._ops._matrix_ops import mm_strategy from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_op_strategy.py
test_bmm_strategies
def test_bmm_strategies(self): from torch.distributed.tensor._ops._matrix_ops import bmm_strategy mesh = self.build_device_mesh() lhs_tensor = torch.randn(8, 6, 8) rhs_tensor = torch.randn(8, 8, 12) lhs_tensor_meta = self._extract_tensor_meta(lhs_tensor) rhs_tensor_meta = self._extract_tensor_meta(rhs_tensor) bmm_combs = ( (Shard(0), Shard(0)), (Shard(1), Replicate()), (Replicate(), Shard(2)), (Shard(2), Shard(1)), (Replicate(), Replicate()), ) for lhs, rhs in bmm_combs: lhs_spec = DTensorSpec(mesh, (lhs,), lhs_tensor_meta) rhs_spec = DTensorSpec(mesh, (rhs,), rhs_tensor_meta) op_schema = OpSchema( torch.ops.aten.bmm.default, ( OpStrategy([PlacementStrategy(lhs_spec)]), OpStrategy([PlacementStrategy(rhs_spec)]), ), {}, ) # test the strategy res_strategies = bmm_strategy(mesh, op_schema) for strtgy in res_strategies.strategies: if strtgy.input_specs == (lhs_spec, rhs_spec): self.assertEqual(strtgy.redistribute_cost, [[0.0], [0.0]]) break op_schema = OpSchema( torch.ops.aten.bmm.default, (lhs_spec, rhs_spec), {}, ) # test sharding prop output_sharding = DTensor._op_dispatcher.sharding_propagator.propagate_op_sharding_non_cached( op_schema ) self.assertFalse(output_sharding.needs_redistribute)
from itertools import chain import torch from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import ( DTensorSpec, Partial, Replicate, Shard, TensorMeta, ) from torch.distributed.tensor._collective_utils import redistribute_cost from torch.distributed.tensor._op_schema import OpSchema, OpStrategy, PlacementStrategy from torch.distributed.tensor._ops._einsum_strategy import ( EinsumDims, gen_einsum_strategies, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import DTensorOpTestBase class TestCostModel(DTensorOpTestBase): from torch.distributed.tensor._ops._matrix_ops import addmm_strategy from torch.distributed.tensor._ops._matrix_ops import mm_strategy from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_optimizers.py
shard_fn
def shard_fn(name, module, device_mesh): if isinstance(module, nn.Linear): for name, param in module.named_parameters(): dist_param = torch.nn.Parameter( distribute_tensor(param, device_mesh, [Shard(0)]) ) # make sure partial sum get cleared after backward() dist_param.register_hook( lambda grad: grad.redistribute(placements=[Shard(0)]) ) module.register_parameter(name, dist_param) # prepare input
from copy import deepcopy import torch import torch.nn as nn from torch.distributed._tensor import ( DeviceMesh, distribute_module, distribute_tensor, DTensor, Replicate, Shard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.optim.optimizer import _foreach_supported_types
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_optimizers.py
input_fn
def input_fn(mod, inputs, device_mesh): # split the input tensor to be sharded input dist_inp = distribute_tensor(inputs[0], device_mesh, [Shard(0)]) return dist_inp # prepare output to be local torch.Tensor
from copy import deepcopy import torch import torch.nn as nn from torch.distributed._tensor import ( DeviceMesh, distribute_module, distribute_tensor, DTensor, Replicate, Shard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.optim.optimizer import _foreach_supported_types
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
forward
def forward(self, input): return self.mlp_1(self.mlp_0(input))
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint class SimpleModel(nn.Module): from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
world_size
def world_size(self) -> int: return 2
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) class TestDTensorCompile(torch._dynamo.test_case.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
test_compile_dtensor_redistribute_backward
def test_compile_dtensor_redistribute_backward(self): mesh = DeviceMesh(device_type="cuda", mesh=torch.arange(self.world_size)) def fn(x, y): dt = DTensor.from_local(x.reshape(2, 4), mesh, [Shard(0)], run_check=False) dt2 = DTensor.from_local(y.reshape(4, 2), mesh, [Shard(1)], run_check=False) dt_out = torch.matmul(dt, dt2) dt_out_redistribute = dt_out.redistribute(mesh, [Replicate()]) return dt_out_redistribute.to_local() opt_fn = torch.compile(fn, backend=aot_eager_graph, fullgraph=True) x_ref = torch.arange(8, requires_grad=True, dtype=torch.float32) y_ref = torch.arange(8, requires_grad=True, dtype=torch.float32) ref = fn(x_ref, y_ref) x = torch.arange(8, requires_grad=True, dtype=torch.float32) y = torch.arange(8, requires_grad=True, dtype=torch.float32) res = opt_fn(x, y) self.assertEqual(res, ref) # Now run and assert the backward + gradients ref.sum().backward() res.sum().backward() self.assertEqual(x_ref.grad, x.grad) self.assertEqual(y_ref.grad, y.grad)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) @instantiate_parametrized_tests class TestDTensorCompileE2E(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_compile.py
fn
def fn(x): a = 0 if x.is_replicate(): a += 1 if x.is_shard(): a += 2 if x.dim < 0: raise RuntimeError("dim < 0") if x.is_shard(0): a += 2 if x.is_shard(dim=0): a += 2 if x.is_shard(dim=None): a += 2 if x.is_partial(): a += 3 return a compiled_fn = torch.compile(backend="aot_eager", fullgraph=True)(fn) for x in [Shard(0), Replicate(), Partial()]: opt_fn = fn(x) compiled_out = compiled_fn(x) self.assertEqual(opt_fn, compiled_out)
import copy import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.testing import torch.distributed as dist import torch.nn as nn from torch._C import FileCheck from torch._inductor.utils import run_and_get_triton_code from torch.distributed._tensor import ( DeviceMesh, DTensor, init_device_mesh, Partial, Replicate, Shard, ) from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper, CheckpointImpl, ) from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, PrepareModuleOutput, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, MLPModule, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.utils.checkpoint import checkpoint fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_dtensor_ops.py
skipOps
def skipOps(test_case_name, base_test_name, to_skip): all_opinfos = op_db for xfail in to_skip: op_name, variant_name, device_type, dtypes, expected_failure = xfail matching_opinfos = [ o for o in all_opinfos if o.name == op_name and o.variant_test_name == variant_name ] assert len(matching_opinfos) >= 1, f"Couldn't find OpInfo for {xfail}" for opinfo in matching_opinfos: decorators = list(opinfo.decorators) if expected_failure: decorator = DecorateInfo( unittest.expectedFailure, test_case_name, base_test_name, device_type=device_type, dtypes=dtypes, ) decorators.append(decorator) else: decorator = DecorateInfo( unittest.skip("Skipped!"), test_case_name, base_test_name, device_type=device_type, dtypes=dtypes, ) decorators.append(decorator) opinfo.decorators = tuple(decorators) # This decorator doesn't modify fn in any way def wrapped(fn): return fn return wrapped # Re-generate this failed list, turn on dry_run of the below func # check_dtensor_func(self, test, op, dry_run=True), then run sth # like python test/distributed/_tensor/test_dtensor_ops.py > failed.expect dtensor_fails = { # these sometimes pass and sometimes fail # we need to remove many of them from list once op # get full support with varying sharding specs xfail("__getitem__"), xfail("__rsub__"), xfail("_native_batch_norm_legit"), xfail("_softmax_backward_data"), xfail("addbmm"), xfail("addmv"), xfail("addr"), xfail("all"), xfail("allclose"), xfail("amax"), xfail("amin"), xfail("aminmax"), xfail("any"), xfail("arange"), xfail("argmax"), xfail("argmin"), xfail("argsort"), xfail("as_strided"), xfail("as_strided", "partial_views"), xfail("as_strided_scatter"), xfail("baddbmm"), xfail("bernoulli"), xfail("block_diag"), xfail("broadcast_shapes"), xfail("cauchy"), xfail("cartesian_prod"), xfail("cdist"), xfail("cholesky"), xfail("cholesky_inverse"), xfail("cholesky_solve"), xfail("chunk"), xfail("clamp"), xfail("clamp_max"), xfail("clamp_min"), xfail("combinations"), xfail("complex"), xfail("constant_pad_nd"), xfail("corrcoef"), xfail("count_nonzero"), xfail("cov"), xfail("cross"), xfail("cummax"), xfail("cummin"), xfail("cumsum"), xfail("cumulative_trapezoid"), xfail("diag"), xfail("diag_embed"), xfail("diagflat"), xfail("diagonal"), xfail("diagonal_copy"), xfail("diagonal_scatter"), xfail("dist"), xfail("dot"), xfail("einsum"), xfail("empty"), xfail("empty_like"), xfail("exponential"), xfail("eye"), xfail("fft.fft2"), xfail("fft.fft"), xfail("fft.fftn"), xfail("fft.fftshift"), xfail("fft.ifft2"), xfail("fft.ifft"), xfail("fft.ifftshift"), xfail("fft.ihfft2"), xfail("fft.ihfft"), xfail("fft.ihfftn"), xfail("fft.irfft2"), xfail("fft.irfftn"), xfail("fft.rfft2"), xfail("fft.rfft"), xfail("fft.rfftn"), xfail("fill"), xfail("flip"), xfail("fliplr"), xfail("flipud"), xfail("floor_divide"), xfail("fmax"), xfail("fmin"), xfail("frexp"), xfail("full"), xfail("full_like"), xfail("gather"), xfail("geometric"), xfail("geqrf"), xfail("grid_sampler_2d"), xfail("gradient"), xfail("heaviside"), xfail("histc"), xfail("histogram"), xfail("histogramdd"), xfail("index_add"), xfail("index_copy"), xfail("index_fill"), xfail("index_put"), xfail("index_reduce"), xfail("index_select"), xfail("isin"), xfail("isinf"), xfail("isneginf"), xfail("isposinf"), xfail("kthvalue"), xfail("linalg.cholesky"), xfail("linalg.cholesky_ex"), xfail("linalg.cond"), xfail("linalg.cross"), xfail("linalg.det"), xfail("linalg.det", "singular"), xfail("linalg.eig"), xfail("linalg.eigh"), xfail("linalg.eigvals"), xfail("linalg.eigvalsh"), xfail("linalg.householder_product"), xfail("linalg.inv"), xfail("linalg.inv_ex"), xfail("linalg.ldl_factor"), xfail("linalg.ldl_factor_ex"), xfail("linalg.ldl_solve"), xfail("linalg.lstsq"), xfail("linalg.lstsq", "grad_oriented"), xfail("linalg.lu"), xfail("linalg.lu_factor"), xfail("linalg.lu_factor_ex"), xfail("linalg.lu_solve"), xfail("linalg.matrix_norm"), xfail("linalg.matrix_power"), xfail("linalg.matrix_rank"), xfail("linalg.matrix_rank", "hermitian"), xfail("linalg.multi_dot"), xfail("linalg.norm"), xfail("linalg.norm", "subgradients_at_zero"), xfail("linalg.pinv"), xfail("linalg.pinv", "hermitian"), xfail("linalg.qr"), xfail("linalg.slogdet"), xfail("linalg.solve"), xfail("linalg.solve_ex"), xfail("linalg.solve_triangular"), xfail("linalg.svd"), xfail("linalg.svdvals"), xfail("linalg.tensorinv"), xfail("linalg.tensorsolve"), xfail("linalg.vander"), xfail("linalg.vecdot"), xfail("linalg.vector_norm"), xfail("linspace"), xfail("log_normal"), xfail("log_softmax"), xfail("log_softmax", "with_dtype"), xfail("logcumsumexp"), xfail("logdet"), xfail("logspace"), xfail("logsumexp"), xfail("lt"), xfail("lu"), xfail("lu_solve"), xfail("lu_unpack"), xfail("masked_fill"), xfail("masked_scatter"), xfail("masked_select"), xfail("masked.amax"), xfail("masked.amin"), xfail("masked.argmax"), xfail("masked.argmin"), xfail("masked.cumprod"), xfail("masked.cumsum"), xfail("masked.log_softmax"), xfail("masked.logaddexp"), xfail("masked.logsumexp"), xfail("masked.median"), xfail("masked.norm"), xfail("masked.prod"), xfail("masked.softmin"), xfail("masked.softmax"), xfail("masked.sum"), xfail("matrix_exp"), xfail("max", "binary"), xfail("max", "reduction_no_dim"), xfail("max", "reduction_with_dim"), xfail("maximum"), xfail("median"), xfail("min", "binary"), xfail("min", "reduction_no_dim"), xfail("min", "reduction_with_dim"), xfail("minimum"), xfail("mode"), xfail("msort"), xfail("multinomial"), xfail("mv"), xfail("max_pool2d_with_indices_backward", ""), xfail("nanmean"), xfail("nanmedian"), xfail("nanquantile"), xfail("nansum"), xfail("native_batch_norm"), xfail("native_dropout_backward"), xfail("native_layer_norm"), xfail("narrow_copy"), xfail("ne"), xfail("new_empty"), xfail("new_empty_strided"), xfail("transpose"), xfail("nn.functional.adaptive_avg_pool1d"), xfail("nn.functional.adaptive_avg_pool2d"), xfail("nn.functional.adaptive_avg_pool3d"), xfail("nn.functional.adaptive_max_pool1d"), xfail("nn.functional.adaptive_max_pool2d"), xfail("nn.functional.adaptive_max_pool3d"), xfail("nn.functional.alpha_dropout"), xfail("nn.functional.avg_pool1d"), xfail("nn.functional.avg_pool2d"), xfail("nn.functional.avg_pool3d"), xfail("nn.functional.batch_norm"), xfail("nn.functional.batch_norm", "without_cudnn"), xfail("nn.functional.bilinear"), xfail("nn.functional.binary_cross_entropy"), xfail("nn.functional.binary_cross_entropy_with_logits"), xfail("nn.functional.celu"), xfail("nn.functional.conv1d"), xfail("nn.functional.conv2d"), xfail("nn.functional.conv_transpose1d"), xfail("nn.functional.conv_transpose2d"), xfail("nn.functional.conv_transpose3d"), xfail("nn.functional.cosine_similarity"), xfail("nn.functional.cross_entropy"), xfail("nn.functional.ctc_loss"), xfail("nn.functional.dropout"), xfail("nn.functional.dropout2d"), xfail("nn.functional.dropout3d"), xfail("nn.functional.elu"), xfail("nn.functional.fractional_max_pool2d"), xfail("nn.functional.fractional_max_pool3d"), xfail("nn.functional.gaussian_nll_loss"), xfail("nn.functional.glu"), xfail("nn.functional.grid_sample"), xfail("nn.functional.group_norm"), xfail("nn.functional.hardshrink"), xfail("nn.functional.hardsigmoid"), xfail("nn.functional.hardswish"), xfail("nn.functional.hardtanh"), xfail("nn.functional.huber_loss"), xfail("nn.functional.instance_norm"), xfail("nn.functional.interpolate", "area"), xfail("nn.functional.interpolate", "bicubic"), xfail("nn.functional.interpolate", "bilinear"), xfail("nn.functional.interpolate", "linear"), xfail("nn.functional.interpolate", "nearest"), xfail("nn.functional.interpolate", "trilinear"), xfail("nn.functional.layer_norm"), xfail("nn.functional.leaky_relu"), xfail("nn.functional.linear"), xfail("nn.functional.local_response_norm"), xfail("nn.functional.logsigmoid"), xfail("nn.functional.margin_ranking_loss"), xfail("nn.functional.max_pool1d"), xfail("nn.functional.max_pool2d"), xfail("nn.functional.max_pool3d"), xfail("nn.functional.max_unpool1d"), xfail("nn.functional.max_unpool1d", "grad"), xfail("nn.functional.max_unpool2d"), xfail("nn.functional.max_unpool2d", "grad"), xfail("nn.functional.max_unpool3d"), xfail("nn.functional.max_unpool3d", "grad"), xfail("nn.functional.mish"), xfail("nn.functional.mse_loss"), xfail("nn.functional.multi_margin_loss"), xfail("nn.functional.multilabel_margin_loss"), xfail("nn.functional.multilabel_soft_margin_loss"), xfail("nn.functional.nll_loss"), xfail("nn.functional.normalize"), xfail("nn.functional.pad", "circular"), xfail("nn.functional.pad", "constant"), xfail("nn.functional.pad", "reflect"), xfail("nn.functional.pad", "replicate"), xfail("nn.functional.pairwise_distance"), xfail("nn.functional.pdist"), xfail("nn.functional.pixel_shuffle"), xfail("nn.functional.pixel_unshuffle"), xfail("nn.functional.poisson_nll_loss"), xfail("nn.functional.prelu"), xfail("nn.functional.relu6"), xfail("nn.functional.rrelu"), xfail("nn.functional.selu"), xfail("nn.functional.silu"), xfail("nn.functional.smooth_l1_loss"), xfail("nn.functional.soft_margin_loss"), xfail("nn.functional.softplus"), xfail("nn.functional.softshrink"), xfail("nn.functional.threshold"), xfail("nn.functional.triplet_margin_loss"), xfail("nn.functional.triplet_margin_with_distance_loss"), xfail("nn.functional.unfold"), xfail("nn.functional.upsample_bilinear"), xfail("nn.functional.upsample_nearest"), xfail("nonzero"), xfail("norm"), xfail("norm", "fro"), xfail("norm", "inf"), xfail("norm", "nuc"), xfail("normal"), xfail("normal", "number_mean"), xfail("ormqr"), xfail("ones"), xfail("pca_lowrank"), xfail("pinverse"), xfail("polar"), xfail("put"), xfail("qr"), xfail("quantile"), xfail("rand_like"), xfail("randint_like"), xfail("randint"), xfail("randn"), xfail("randn_like"), xfail("renorm"), xfail("repeat_interleave"), xfail("resize_"), xfail("resize_as_"), xfail("roll"), xfail("rot90"), xfail("rsub"), xfail("scalar_tensor"), xfail("scatter_add"), xfail("scatter"), xfail("scatter_reduce", "amax"), xfail("scatter_reduce", "amin"), xfail("scatter_reduce", "mean"), xfail("scatter_reduce", "prod"), xfail("scatter_reduce", "sum"), xfail("searchsorted"), xfail("select"), xfail("select_scatter"), xfail("sort"), xfail("sparse.sampled_addmm"), xfail("sparse.mm", "reduce"), xfail("special.airy_ai"), xfail("special.bessel_j0"), xfail("special.bessel_j1"), xfail("special.bessel_y0"), xfail("special.bessel_y1"), xfail("special.chebyshev_polynomial_t"), xfail("special.chebyshev_polynomial_u"), xfail("special.entr"), xfail("special.erfcx"), xfail("special.hermite_polynomial_h"), xfail("special.hermite_polynomial_he"), xfail("special.i0e"), xfail("special.i1"), xfail("special.i1e"), xfail("special.laguerre_polynomial_l"), xfail("special.log_ndtr"), xfail("special.modified_bessel_i0"), xfail("special.modified_bessel_i1"), xfail("special.modified_bessel_k0"), xfail("special.modified_bessel_k1"), xfail("special.ndtri"), xfail("special.scaled_modified_bessel_k0"), xfail("special.scaled_modified_bessel_k1"), xfail("special.spherical_bessel_j0"), xfail("special.xlog1py"), xfail("special.zeta"), xfail("squeeze", "multiple"), xfail("signal.windows.bartlett"), xfail("signal.windows.blackman"), xfail("signal.windows.cosine"), xfail("signal.windows.exponential"), xfail("signal.windows.gaussian"), xfail("signal.windows.general_cosine"), xfail("signal.windows.general_hamming"), xfail("signal.windows.hamming"), xfail("signal.windows.hann"), xfail("signal.windows.nuttall"), xfail("signal.windows.kaiser"), xfail("stack"), xfail("std"), xfail("std", "unbiased"), xfail("std_mean"), xfail("std_mean", "unbiased"), xfail("stft"), xfail("svd"), xfail("svd_lowrank"), xfail("t"), xfail("take_along_dim"), xfail("take"), xfail("tensor_split"), xfail("to_sparse"), xfail("topk"), xfail("trace"), xfail("trapezoid"), xfail("trapz"), xfail("triangular_solve"), xfail("tril"), xfail("triu"), xfail("unbind"), xfail("unfold"), xfail("unfold_copy"), xfail("uniform"), xfail("unflatten"), xfail("unique_consecutive"), xfail("unique"), xfail("unsafe_split"), xfail("var_mean"), xfail("var_mean", "unbiased"), xfail("vdot"), xfail("view_copy"), xfail("view_as_complex"), xfail("where"), xfail("zeros"), # ops inside this might even fail without dtensor # tests, as we rescale op db common test size factor (i.e. L, M, S) # which triggered the orignal function run failures with input # generation becomes wrong, we skip them for now but should enable later. # TODO: need to clean this list and remove all cases skip("argwhere"), skip("cumprod"), skip("__rmatmul__"), skip("meshgrid", "list_of_tensors"), skip("meshgrid", "variadic_tensors"), skip("nn.functional.scaled_dot_product_attention"), skip("nn.functional.softmin"), skip("nn.functional.embedding"), skip("nn.functional.embedding_bag"), skip("nn.functional.feature_alpha_dropout", "with_train"), skip("nn.functional.feature_alpha_dropout", "without_train"), skip("nn.functional.hinge_embedding_loss"), skip("nn.functional.cosine_embedding_loss"), skip("fft.hfft"), skip("fft.hfft2"), skip("fft.hfft2"), skip("fft.hfftn"), skip("fft.ifftn"), skip("fft.irfft"), skip("istft"), skip("isclose"), skip("isreal"), skip("matmul"), skip("masked.mean"), skip("masked.var"), skip("masked.std"), skip("masked.normalize"), skip("prod"), skip("_segment_reduce", "lengths"), skip("_segment_reduce", "offsets"), # TODO: fix the following ops skip("squeeze"), } # Add a list of ops that are currently failing BW pass skip_bw = [ None, # corresponds to the transpose ops 'H' and 'T' "torch.bucketize", "torch.conj_physical", "torch.eq", "torch.isfinite", "torch.isnan", ] OP_DB_WORLD_SIZE = 4 # DEVICE_TYPE = "cuda" if torch.cuda.is_available() and torch.cuda.device_count() >= OP_DB_WORLD_SIZE else "cpu" # TODO: debug cuda illegal memory access issue and re-enable cuda tests DEVICE_TYPE = "cpu" class TestDTensorOps(DTensorOpTestBase): @property def world_size(self) -> int: return OP_DB_WORLD_SIZE # only allow float dytpe for now, we can relax this constraint # when feel necessary later (i.e when adding quantization support). @unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN") @suppress_warnings @ops(op_db, allowed_dtypes=(torch.float,)) @skipOps("TestDTensorOps", "test_dtensor_op_db", dtensor_fails) def test_dtensor_op_db(self, dtype, op): self.mesh = DeviceMesh(DEVICE_TYPE, torch.arange(self.world_size)) # test each op with dist tensor inputs and normal inputs def test(): samples = op.sample_inputs(DEVICE_TYPE, dtype, requires_grad=True) for sample_input in samples: args = [sample_input.input] + list(sample_input.args) kwargs = sample_input.kwargs self.run_dtensor_crossref(op.op, args, kwargs) # we need to figure out a way to test the out variant, out variant testing # is tricky, as we need to pre allocate the dtensor out, some of them rely # on sharding placements to be pre-known (i.e. mm.out) # if isinstance(expected, torch.Tensor) and op.supports_out: # func(*args, **kwargs, out=expected) self.check_dtensor_func(test, op) def assert_ref_dtensor_equal(self, dtensor_rs, rs): flat_dtensor_rs, _ = tree_flatten(dtensor_rs) flat_rs, _ = tree_flatten(rs) self.assertEqual(len(flat_dtensor_rs), len(flat_rs)) for dtensor_r, r in zip(flat_dtensor_rs, flat_rs): if not isinstance(r, torch.Tensor): continue self.assertIsInstance(dtensor_r, torch.Tensor) self.assertEqualOnRank( dtensor_r.shape, r.shape, f"Shape mismatch! original shape:{r.shape}, dtensor shape: {dtensor_r.shape}", ) self.assertEqualOnRank( dtensor_r.requires_grad, r.requires_grad, "op result requires_grad mismatch!" f"original requires_grad: {r.requires_grad}, " f"dtensor requires_grad: {dtensor_r.requires_grad}", ) self.assertEqualOnRank(dtensor_r.to_local(), r) def run_dtensor_crossref(self, func, args, kwargs): to_dtensor = DTensorConverter(self.mesh, args, kwargs) def concat_res_if_necessary(func, res: object) -> object: # concat the result on corresponding dim for ops like # split, so that we can call backward on a single tensor if ( (resolve_name(func) is not None) and ("split" in resolve_name(func)) ): dim = args[2] if len(args) == 3 else 0 return torch.cat(res, dim=dim) else: return res # TODO: also handle cases where func raise an exception rs = func(*args, **kwargs) rs = concat_res_if_necessary(func, rs) def to_replicate(e: object) -> object: return ( e.redistribute(self.mesh, self.mesh.ndim * [Replicate()]) if isinstance(e, DTensor) else e ) try: # Suppress warnings, this doesn't matter for test_meta.py # but it does matter if you want to use this decorator # for cross-ref testing, as some tests may be looking at # errors with warnings.catch_warnings(): warnings.simplefilter("ignore") # for every comb of sharding choices, we test if it works for dtensor_args, dtensor_kwargs in to_dtensor: # Only attempt if we managed to convert all tensors to DTensor # (if any of them failed, we're in a mixed tensor situation and # this is not allowed in DTensor) if to_dtensor.successful(): # Handle special cases first if there's any # Suppress warnings, this doesn't matter for test_meta.py # but it does matter if you want to use this decorator # for cross-ref testing, as some tests may be looking at # errors dtensor_rs = func(*dtensor_args, **dtensor_kwargs) # we need to skip tests containing tensors of zero elmeents for now. # see issue: https://github.com/pytorch/tau/issues/470 # TODO remove this once issue above fixed. flat_args, _ = tree_flatten(dtensor_rs) if any( isinstance(e, torch.Tensor) and e.numel() == 0 for e in flat_args ): continue # redistribute/all_gather the results to compare with normal output dtensor_rs = tree_map(to_replicate, dtensor_rs) dtensor_rs = concat_res_if_necessary(func, dtensor_rs) try: if resolve_name(func) not in skip_bw: if isinstance(dtensor_rs, DTensor): dtensor_rs.to_local().sum().backward() elif isinstance(dtensor_rs, tuple): dtensor_rs[0].to_local().sum().backward() except Exception as e: # TODO(anj): Remove this guard exception after gaining more confidence. if torch.distributed.get_rank() == 0: print( f"failed to run BW: {resolve_name(func)}, {func}, {str(e)})" ) self.assert_ref_dtensor_equal(dtensor_rs, rs) else: raise RuntimeError( f"failed to convert args to DTensor; " f"originally (*{args}, **{kwargs})" ) except Exception as e: raise RuntimeError( f"failed to run: {resolve_name(func)}, with (*{args}, **{kwargs})" ) from e return rs def check_dtensor_func(self, test_func, opinfo, dry_run=False): try: test_func() except Exception: if not dry_run: raise if dist.get_rank() == 0: if opinfo.variant_test_name: print(f"xfail('{opinfo.name}', '{opinfo.variant_test_name}'),") else: print(f"xfail('{opinfo.name}'),") # only instantiate tests for DEVICE_TYPE alone (i.e. either CPU or GPU) instantiate_device_type_tests(TestDTensorOps, globals(), only_for=(DEVICE_TYPE,)) if __name__ == "__main__": run_tests()
def skipOps(test_case_name, base_test_name, to_skip): all_opinfos = op_db for xfail in to_skip: op_name, variant_name, device_type, dtypes, expected_failure = xfail matching_opinfos = [ o for o in all_opinfos if o.name == op_name and o.variant_test_name == variant_name ] assert len(matching_opinfos) >= 1, f"Couldn't find OpInfo for {xfail}" for opinfo in matching_opinfos: decorators = list(opinfo.decorators) if expected_failure: decorator = DecorateInfo( unittest.expectedFailure, test_case_name, base_test_name, device_type=device_type, dtypes=dtypes, ) decorators.append(decorator) else: decorator = DecorateInfo( unittest.skip("Skipped!"), test_case_name, base_test_name, device_type=device_type, dtypes=dtypes, ) decorators.append(decorator) opinfo.decorators = tuple(decorators) # This decorator doesn't modify fn in any way def wrapped(fn): return fn return wrapped # Re-generate this failed list, turn on dry_run of the below func # check_dtensor_func(self, test, op, dry_run=True), then run sth # like python test/distributed/_tensor/test_dtensor_ops.py > failed.expect dtensor_fails = { # these sometimes pass and sometimes fail # we need to remove many of them from list once op # get full support with varying sharding specs xfail("__getitem__"), xfail("__rsub__"), xfail("_chunk_cat"), xfail("_native_batch_norm_legit"), xfail("_upsample_bilinear2d_aa"), xfail("addbmm"), xfail("addmv"), xfail("addr"), xfail("all"), xfail("allclose"), xfail("alias_copy"), xfail("amax"), xfail("amin"), xfail("aminmax"), xfail("any"), xfail("arange"), xfail("argmax"), xfail("argmin"), xfail("argsort"), xfail("as_strided"), xfail("as_strided", "partial_views"), xfail("as_strided_copy"), xfail("as_strided_scatter"), xfail("bernoulli"), xfail("_batch_norm_with_update"), xfail("block_diag"), xfail("broadcast_shapes"), xfail("cauchy"), xfail("cdist"), xfail("cholesky"), xfail("cholesky_inverse"), xfail("cholesky_solve"), xfail("chunk"), xfail("clamp"), xfail("clamp_max"), xfail("clamp_min"), xfail("combinations"), xfail("complex"), xfail("constant_pad_nd"), xfail("count_nonzero"), xfail("cross"), xfail("cummax"), xfail("cummin"), xfail("cumsum"), xfail("cumulative_trapezoid"), xfail("diagonal_scatter"), xfail("dist"), xfail("dot"), xfail("empty"), xfail("empty_strided"), xfail("empty_like"), xfail("empty_permuted"), xfail("expand_copy"), xfail("exponential"), xfail("equal"), xfail("eye"), xfail("fft.fft2"), xfail("fft.fft"), xfail("fft.fftn"), xfail("fft.fftshift"), xfail("fft.ifft2"), xfail("fft.ifft"), xfail("fft.ifftshift"), xfail("fft.ihfft2"), xfail("fft.ihfft"), xfail("fft.ihfftn"), xfail("fft.irfft2"), xfail("fft.irfftn"), xfail("fft.rfft2"), xfail("fft.rfft"), xfail("fft.rfftn"), xfail("fill"), xfail("flip"), xfail("fliplr"), xfail("flipud"), xfail("floor_divide"), xfail("fmax"), xfail("fmin"), xfail("frexp"), xfail("full"), xfail("full_like"), xfail("gather"), xfail("geometric"), xfail("geqrf"), xfail("grid_sampler_2d"), xfail("gradient"), xfail("heaviside"), xfail("histc"), xfail("histogram"), xfail("histogramdd"), xfail("index_add"), xfail("index_copy"), xfail("index_fill"), xfail("index_put"), xfail("index_reduce", "prod"), xfail("index_reduce", "mean"), xfail("index_reduce", "amax"), xfail("index_reduce", "amin"), xfail("index_select"), xfail("isin"), xfail("kthvalue"), xfail("linalg.cholesky"), xfail("linalg.cholesky_ex"), xfail("linalg.cross"), xfail("linalg.det"), xfail("linalg.det", "singular"), xfail("linalg.eig"), xfail("linalg.eigvals"), xfail("linalg.householder_product"), xfail("linalg.inv"), xfail("linalg.inv_ex"), xfail("linalg.ldl_factor"), xfail("linalg.ldl_factor_ex"), xfail("linalg.ldl_solve"), xfail("linalg.lstsq"), xfail("linalg.lstsq", "grad_oriented"), xfail("linalg.lu"), xfail("linalg.lu_factor"), xfail("linalg.lu_factor_ex"), xfail("linalg.lu_solve"), xfail("linalg.matrix_norm"), xfail("linalg.matrix_power"), xfail("linalg.matrix_rank"), xfail("linalg.matrix_rank", "hermitian"), xfail("linalg.multi_dot"), xfail("linalg.norm"), xfail("linalg.norm", "subgradients_at_zero"), xfail("linalg.pinv"), xfail("linalg.pinv", "hermitian"), xfail("linalg.slogdet"), xfail("linalg.solve"), xfail("linalg.solve_ex"), xfail("linalg.solve_triangular"), xfail("linalg.tensorinv"), xfail("linalg.tensorsolve"), xfail("linalg.vander"), xfail("linalg.vecdot"), xfail("linspace"), xfail("linspace", "tensor_overload"), xfail("log_normal"), xfail("logcumsumexp"), xfail("logdet"), xfail("logspace"), xfail("logspace", "tensor_overload"), xfail("logsumexp"), xfail("lu"), xfail("lu_solve"), xfail("lu_unpack"), xfail("masked_fill"), xfail("masked_scatter"), xfail("masked_select"), xfail("masked.amax"), xfail("masked.amin"), xfail("masked.argmax"), xfail("masked.argmin"), xfail("masked.cumprod"), xfail("masked.cumsum"), xfail("masked.logsumexp"), xfail("masked.median"), xfail("matrix_exp"), xfail("max", "binary"), xfail("max", "reduction_with_dim"), xfail("maximum"), xfail("median"), xfail("min", "binary"), xfail("min", "reduction_with_dim"), xfail("minimum"), xfail("mode"), xfail("msort"), xfail("multinomial"), xfail("mv"), xfail("max_pool2d_with_indices_backward", ""), xfail("nanmean"), xfail("nanmedian"), xfail("nanquantile"), xfail("nansum"), xfail("native_batch_norm"), xfail("native_dropout_backward"), xfail("narrow_copy"), xfail("ne"), xfail("new_empty"), xfail("new_empty_strided"), xfail("transpose"), xfail("nn.functional.adaptive_avg_pool1d"), xfail("nn.functional.adaptive_avg_pool2d"), xfail("nn.functional.adaptive_avg_pool3d"), xfail("nn.functional.adaptive_max_pool1d"), xfail("nn.functional.adaptive_max_pool2d"), xfail("nn.functional.adaptive_max_pool3d"), xfail("nn.functional.alpha_dropout"), xfail("nn.functional.avg_pool1d"), xfail("nn.functional.avg_pool2d"), xfail("nn.functional.avg_pool3d"), xfail("nn.functional.batch_norm"), xfail("nn.functional.batch_norm", "without_cudnn"), xfail("nn.functional.bilinear"), xfail("nn.functional.binary_cross_entropy"), xfail("nn.functional.binary_cross_entropy_with_logits"), xfail("nn.functional.celu"), xfail("nn.functional.conv1d"), xfail("nn.functional.conv2d"), xfail("nn.functional.conv3d"), xfail("nn.functional.conv_transpose1d"), xfail("nn.functional.conv_transpose2d"), xfail("nn.functional.conv_transpose3d"), xfail("nn.functional.cosine_similarity"), xfail("nn.functional.ctc_loss"), xfail("nn.functional.dropout"), xfail("nn.functional.dropout2d"), xfail("nn.functional.dropout3d"), xfail("nn.functional.elu"), xfail("nn.functional.fractional_max_pool2d"), xfail("nn.functional.fractional_max_pool3d"), xfail("nn.functional.glu"), xfail("nn.functional.grid_sample"), xfail("nn.functional.group_norm"), xfail("nn.functional.hardshrink"), xfail("nn.functional.hardsigmoid"), xfail("nn.functional.hardswish"), xfail("nn.functional.hardtanh"), xfail("nn.functional.huber_loss"), xfail("nn.functional.instance_norm"), xfail("nn.functional.interpolate", "area"), xfail("nn.functional.interpolate", "bicubic"), xfail("nn.functional.interpolate", "bilinear"), xfail("nn.functional.interpolate", "linear"), xfail("nn.functional.interpolate", "nearest"), xfail("nn.functional.interpolate", "nearest-exact"), xfail("nn.functional.interpolate", "trilinear"), xfail("nn.functional.leaky_relu"), xfail("nn.functional.linear"), xfail("nn.functional.local_response_norm"), xfail("nn.functional.logsigmoid"), xfail("nn.functional.margin_ranking_loss"), xfail("nn.functional.max_pool1d"), xfail("nn.functional.max_pool2d"), xfail("nn.functional.max_pool3d"), xfail("nn.functional.max_unpool1d"), xfail("nn.functional.max_unpool1d", "grad"), xfail("nn.functional.max_unpool2d"), xfail("nn.functional.max_unpool2d", "grad"), xfail("nn.functional.max_unpool3d"), xfail("nn.functional.max_unpool3d", "grad"), xfail("nn.functional.mish"), xfail("nn.functional.mse_loss"), xfail("nn.functional.multi_margin_loss"), xfail("nn.functional.multi_head_attention_forward"), xfail("nn.functional.multilabel_margin_loss"), xfail("nn.functional.multilabel_soft_margin_loss"), xfail("nn.functional.normalize"), xfail("nn.functional.pad", "constant"), xfail("nn.functional.pad", "reflect"), xfail("nn.functional.pad", "replicate"), xfail("nn.functional.pad", "replicate_negative"), xfail("nn.functional.pairwise_distance"), xfail("nn.functional.pdist"), xfail("nn.functional.pixel_shuffle"), xfail("nn.functional.pixel_unshuffle"), xfail("nn.functional.prelu"), xfail("nn.functional.relu6"), xfail("nn.functional.rrelu"), xfail("nn.functional.selu"), xfail("nn.functional.smooth_l1_loss"), xfail("nn.functional.soft_margin_loss"), xfail("nn.functional.softplus"), xfail("nn.functional.softshrink"), xfail("nn.functional.threshold"), xfail("nn.functional.triplet_margin_loss"), xfail("nn.functional.triplet_margin_with_distance_loss"), xfail("nn.functional.unfold"), xfail("nn.functional.upsample_bilinear"), xfail("nn.functional.upsample_nearest"), xfail("nonzero"), xfail("normal"), xfail("normal", "number_mean"), xfail("normal", "in_place"), xfail("ormqr"), xfail("ones"), xfail("pca_lowrank"), xfail("pinverse"), xfail("polar"), xfail("put"), xfail("quantile"), xfail("rand_like"), xfail("randint_like"), xfail("randint"), xfail("randn"), xfail("randn_like"), xfail("renorm"), xfail("repeat_interleave"), xfail("resize_"), xfail("resize_as_"), xfail("roll"), xfail("rot90"), xfail("rsub"), xfail("scalar_tensor"), xfail("scatter_add"), xfail("scatter_reduce", "amax"), xfail("scatter_reduce", "amin"), xfail("scatter_reduce", "mean"), xfail("scatter_reduce", "prod"), xfail("scatter_reduce", "sum"), xfail("searchsorted"), xfail("select"), xfail("select_scatter"), xfail("sort"), xfail("sparse.sampled_addmm"), xfail("sparse.mm", "reduce"), xfail("special.airy_ai"), xfail("special.bessel_j0"), xfail("special.bessel_j1"), xfail("special.bessel_y0"), xfail("special.bessel_y1"), xfail("special.chebyshev_polynomial_t"), xfail("special.chebyshev_polynomial_u"), xfail("special.entr"), xfail("special.erfcx"), xfail("special.hermite_polynomial_h"), xfail("special.hermite_polynomial_he"), xfail("special.i0e"), xfail("special.i1"), xfail("special.i1e"), xfail("special.laguerre_polynomial_l"), xfail("special.log_ndtr"), xfail("special.modified_bessel_i0"), xfail("special.modified_bessel_i1"), xfail("special.modified_bessel_k0"), xfail("special.modified_bessel_k1"), xfail("special.ndtri"), xfail("special.scaled_modified_bessel_k0"), xfail("special.scaled_modified_bessel_k1"), xfail("special.spherical_bessel_j0"), xfail("special.xlog1py"), xfail("special.zeta"), xfail("squeeze", "multiple"), xfail("signal.windows.bartlett"), xfail("signal.windows.blackman"), xfail("signal.windows.cosine"), xfail("signal.windows.exponential"), xfail("signal.windows.gaussian"), xfail("signal.windows.general_cosine"), xfail("signal.windows.general_hamming"), xfail("signal.windows.hamming"), xfail("signal.windows.hann"), xfail("signal.windows.nuttall"), xfail("signal.windows.kaiser"), xfail("stack"), xfail("std"), xfail("std", "unbiased"), xfail("std_mean"), xfail("std_mean", "unbiased"), xfail("stft"), xfail("svd_lowrank"), xfail("t_copy"), xfail("take"), xfail("tensor_split"), xfail("to_sparse"), xfail("trace"), xfail("trapezoid"), xfail("trapz"), xfail("triangular_solve"), xfail("unbind"), xfail("unfold"), xfail("unfold_copy"), xfail("uniform"), xfail("unflatten"), xfail("unique_consecutive"), xfail("unique"), xfail("unsafe_split"), xfail("unsafe_chunk"), xfail("_unsafe_masked_index"), xfail("_unsafe_masked_index_put_accumulate"), xfail("var_mean"), xfail("var_mean", "unbiased"), xfail("vdot"), xfail("view_copy"), xfail("zeros"), # ops inside this might even fail without dtensor # tests, as we rescale op db common test size factor (i.e. L, M, S) # which triggered the original function run failures with input # generation becomes wrong, we skip them for now but should enable later. # TODO: need to clean this list and remove all cases skip("argwhere"), skip("cumprod"), skip("__rmatmul__"), skip("meshgrid", "list_of_tensors"), skip("meshgrid", "variadic_tensors"), skip("nn.functional.scaled_dot_product_attention"), skip("nn.functional.softmin"), skip("nn.functional.embedding"), skip("nn.functional.embedding_bag"), skip("nn.functional.feature_alpha_dropout", "with_train"), skip("nn.functional.feature_alpha_dropout", "without_train"), skip("nn.functional.hinge_embedding_loss"), skip("nn.functional.cosine_embedding_loss"), skip("fft.hfft"), skip("fft.hfft2"), skip("fft.hfft2"), skip("fft.hfftn"), skip("fft.ifftn"), skip("fft.irfft"), skip("istft"), skip("isclose"), skip("isreal"), skip("matmul"), skip("masked.mean"), skip("masked.var"), skip("masked.std"), skip("masked.normalize"), skip("prod"), skip("_segment_reduce", "lengths"), skip("_segment_reduce", "offsets"), # TODO: fix the following ops skip("squeeze"), } # Add a list of ops that are currently failing BW pass skip_bw = [ None, # corresponds to the transpose ops 'H' and 'T' "torch.bucketize", "torch.conj_physical", "torch.eq", "torch.isfinite", "torch.isnan", ] OP_DB_WORLD_SIZE = 4 # DEVICE_TYPE = "cuda" if torch.cuda.is_available() and torch.cuda.device_count() >= OP_DB_WORLD_SIZE else "cpu" # TODO: debug cuda illegal memory access issue and re-enable cuda tests DEVICE_TYPE = "cpu" class TestDTensorOps(DTensorOpTestBase): @property def world_size(self) -> int: return OP_DB_WORLD_SIZE # only allow float dytpe for now, we can relax this constraint # when feel necessary later (i.e when adding quantization support). @unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN") @suppress_warnings @ops(op_db, allowed_dtypes=(torch.float,)) @skipOps("TestDTensorOps", "test_dtensor_op_db", dtensor_fails) def test_dtensor_op_db(self, dtype, op): self.mesh = DeviceMesh(DEVICE_TYPE, torch.arange(self.world_size)) # test each op with dist tensor inputs and normal inputs def test(): samples = op.sample_inputs(DEVICE_TYPE, dtype, requires_grad=True) for sample_input in samples: args = [sample_input.input] + list(sample_input.args) kwargs = sample_input.kwargs self.run_dtensor_crossref(op.op, args, kwargs) # we need to figure out a way to test the out variant, out variant testing # is tricky, as we need to pre allocate the dtensor out, some of them rely # on sharding placements to be pre-known (i.e. mm.out) # if isinstance(expected, torch.Tensor) and op.supports_out: # func(*args, **kwargs, out=expected) self.check_dtensor_func(test, op) def assert_ref_dtensor_equal(self, dtensor_rs, rs): flat_dtensor_rs = pytree.tree_leaves(dtensor_rs) flat_rs = pytree.tree_leaves(rs) self.assertEqual(len(flat_dtensor_rs), len(flat_rs)) for dtensor_r, r in zip(flat_dtensor_rs, flat_rs): if not isinstance(r, torch.Tensor): continue self.assertIsInstance(dtensor_r, torch.Tensor) self.assertEqualOnRank( dtensor_r.shape, r.shape, f"Shape mismatch! original shape:{r.shape}, dtensor shape: {dtensor_r.shape}", ) self.assertEqualOnRank( dtensor_r.requires_grad, r.requires_grad, "op result requires_grad mismatch!" f"original requires_grad: {r.requires_grad}, " f"dtensor requires_grad: {dtensor_r.requires_grad}", ) self.assertEqualOnRank(dtensor_r, r) def run_dtensor_crossref(self, func, args, kwargs): to_dtensor = DTensorConverter(self.mesh, args, kwargs) def concat_res_if_necessary(func, res: object) -> object: # concat the result on corresponding dim for ops like # split, so that we can call backward on a single tensor if (resolve_name(func) is not None) and ("split" in resolve_name(func)): dim = args[2] if len(args) == 3 else 0 return torch.cat(res, dim=dim) else: return res # TODO: also handle cases where func raise an exception rs = func(*args, **kwargs) rs = concat_res_if_necessary(func, rs) def to_replicate(e: object) -> object: return e.full_tensor() if isinstance(e, DTensor) else e try: # Suppress warnings, this doesn't matter for test_meta.py # but it does matter if you want to use this decorator # for cross-ref testing, as some tests may be looking at # errors with warnings.catch_warnings(): warnings.simplefilter("ignore") # for every comb of sharding choices, we test if it works for dtensor_args, dtensor_kwargs in to_dtensor: # Only attempt if we managed to convert all tensors to DTensor # (if any of them failed, we're in a mixed tensor situation and # this is not allowed in DTensor) if to_dtensor.successful(): # Handle special cases first if there's any # Suppress warnings, this doesn't matter for test_meta.py # but it does matter if you want to use this decorator # for cross-ref testing, as some tests may be looking at # errors dtensor_rs = func(*dtensor_args, **dtensor_kwargs) # we need to skip tests containing tensors of zero elements for now. # see issue: https://github.com/pytorch/tau/issues/470 # TODO remove this once issue above fixed. flat_args = pytree.tree_leaves(dtensor_rs) if any( isinstance(e, torch.Tensor) and e.numel() == 0 for e in flat_args ): continue # redistribute/all_gather the results to compare with normal output dtensor_rs = tree_map(to_replicate, dtensor_rs) dtensor_rs = concat_res_if_necessary(func, dtensor_rs) try: if resolve_name(func) not in skip_bw: if isinstance(dtensor_rs, DTensor): dtensor_rs.to_local().sum().backward() elif isinstance(dtensor_rs, tuple): dtensor_rs[0].to_local().sum().backward() except Exception as e: # TODO(anj): Remove this guard exception after gaining more confidence. if torch.distributed.get_rank() == 0: print( f"failed to run BW: {resolve_name(func)}, {func}, {str(e)})" ) self.assert_ref_dtensor_equal(dtensor_rs, rs) else: raise RuntimeError( f"failed to convert args to DTensor; " f"originally (*{args}, **{kwargs})" ) except Exception as e: raise RuntimeError( f"failed to run: {resolve_name(func)}, with (*{args}, **{kwargs})" ) from e return rs def check_dtensor_func(self, test_func, opinfo, dry_run=False): try: test_func() except Exception: if not dry_run: raise if dist.get_rank() == 0: if opinfo.variant_test_name: print(f"xfail('{opinfo.name}', '{opinfo.variant_test_name}'),") else: print(f"xfail('{opinfo.name}'),") # only instantiate tests for DEVICE_TYPE alone (i.e. either CPU or GPU) instantiate_device_type_tests(TestDTensorOps, globals(), only_for=(DEVICE_TYPE,)) if __name__ == "__main__": run_tests()
import unittest import warnings import torch import torch.distributed as dist import torch.testing._internal.common_methods_invocations as common_ops from torch.distributed._tensor import DeviceMesh, DTensor, Replicate from torch.overrides import resolve_name from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import ( DecorateInfo, op_db, ) from torch.testing._internal.common_utils import ( run_tests, suppress_warnings, TEST_WITH_ASAN, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorConverter, DTensorOpTestBase, ) from torch.utils._pytree import tree_flatten, tree_map common_ops.L = 24 common_ops.M = 12 common_ops.S = 4 common_ops.XS = 2
import unittest import warnings import torch import torch.distributed as dist import torch.testing._internal.common_methods_invocations as common_ops from torch.distributed._tensor import DeviceMesh, DTensor from torch.overrides import resolve_name from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import DecorateInfo, op_db from torch.testing._internal.common_utils import ( run_tests, suppress_warnings, TEST_WITH_ASAN, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorConverter, DTensorOpTestBase, ) from torch.utils import _pytree as pytree from torch.utils._pytree import tree_map common_ops.L = 24 common_ops.M = 12 common_ops.S = 4 common_ops.XS = 2
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/_tensor/test_matrix_ops.py
test_scaled_dot_product_attention
if __name__ == "__main__": run_tests()
def test_scaled_dot_product_attention(self): device_mesh = DeviceMesh(self.device_type, list(range(self.world_size))) comm_mode = CommDebugMode() # bsz, n_heads, slen, head_dim query = torch.rand( (4, 8, 8, 8), device=self.device_type, dtype=torch.bfloat16, requires_grad=True, ) key = torch.rand( (4, 8, 8, 8), device=self.device_type, dtype=torch.bfloat16, requires_grad=True, ) value = torch.rand( (4, 8, 8, 8), device=self.device_type, dtype=torch.bfloat16, requires_grad=True, ) dist_query = distribute_tensor(query, device_mesh, [Shard(1)]) dist_key = distribute_tensor(key, device_mesh, [Shard(1)]) dist_value = distribute_tensor(value, device_mesh, [Shard(1)]) from torch.nn.attention import sdpa_kernel, SDPBackend available_backends = [] dropout_p = 0.0 # TODO: Add test cases where is_causal=False and an attention mask is provided. # Gaps include missing op support for aten.masked_fill_.Scalar. is_causal = True enable_gqa = False params = torch.backends.cuda.SDPAParams( query, key, value, None, dropout_p, is_causal, enable_gqa ) if torch.backends.cuda.can_use_flash_attention(params, debug=False): available_backends.append(SDPBackend.FLASH_ATTENTION) if torch.backends.cuda.can_use_efficient_attention(params, debug=False): available_backends.append(SDPBackend.EFFICIENT_ATTENTION) for backend in available_backends: with sdpa_kernel(backends=[backend]): out = F.scaled_dot_product_attention( query, key, value, dropout_p=dropout_p, is_causal=is_causal ) with comm_mode: dist_out = F.scaled_dot_product_attention( dist_query, dist_key, dist_value, dropout_p=dropout_p, is_causal=is_causal, ) self.assertEqual(comm_mode.get_total_counts(), 0) self.assertTrue(dist_out.placements[0].is_shard(dim=1)) self.assertEqual(dist_out.full_tensor(), out) out.sum().backward() with comm_mode: dist_out.sum().backward() self.assertEqual(comm_mode.get_total_counts(), 0) self.assertTrue(dist_query.grad.placements[0].is_shard(dim=1)) self.assertEqual(dist_query.grad.full_tensor(), query.grad) self.assertTrue(dist_key.grad.placements[0].is_shard(dim=1)) self.assertEqual(dist_key.grad.full_tensor(), key.grad) self.assertTrue(dist_value.grad.placements[0].is_shard(dim=1)) self.assertEqual(dist_value.grad.full_tensor(), value.grad)
import itertools from typing import cast, List, Optional import torch import torch.nn.functional as F from torch.distributed._tensor import DeviceMesh, distribute_tensor from torch.distributed._tensor.api import DTensor from torch.distributed._tensor.placement_types import ( Partial, Placement, Replicate, Shard, ) from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, skip_unless_torch_gpu, with_comms, ) class DistMatrixOpsTest(DTensorTestBase): from torch.nn.attention import sdpa_kernel, SDPBackend
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_op_strategy.py
test_batch_dims
def test_batch_dims(self): equation = "abc,abc->abc" input_dims, output_dim = EinsumDims.parse_equation(equation) edims = EinsumDims.parse_dims(input_dims, output_dim) self.assertEqual(edims.batch_dims, ["a", "b", "c"]) self.assertEqual(edims.contracting_dims, []) self.assertEqual(edims.lhs_out_only_dims, []) self.assertEqual(edims.rhs_out_only_dims, [])
from itertools import chain import torch from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import ( DTensorSpec, Partial, Replicate, Shard, TensorMeta, ) from torch.distributed.tensor._collective_utils import redistribute_cost from torch.distributed.tensor._op_schema import OpSchema, OpStrategy, PlacementStrategy from torch.distributed.tensor._ops._einsum_strategy import ( EinsumDims, gen_einsum_strategies, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import DTensorOpTestBase class TestEinsumDims(TestCase): from torch.distributed.tensor._ops._matrix_ops import addmm_strategy from torch.distributed.tensor._ops._matrix_ops import mm_strategy from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_op_strategy.py
test_mm_dims
def test_mm_dims(self): equation = "mk,kn->mn" input_dims, output_dim = EinsumDims.parse_equation(equation) edims = EinsumDims.parse_dims(input_dims, output_dim) self.assertEqual(edims.batch_dims, []) self.assertEqual(edims.contracting_dims, ["k"]) self.assertEqual(edims.lhs_out_only_dims, ["m"]) self.assertEqual(edims.rhs_out_only_dims, ["n"])
from itertools import chain import torch from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import ( DTensorSpec, Partial, Replicate, Shard, TensorMeta, ) from torch.distributed.tensor._collective_utils import redistribute_cost from torch.distributed.tensor._op_schema import OpSchema, OpStrategy, PlacementStrategy from torch.distributed.tensor._ops._einsum_strategy import ( EinsumDims, gen_einsum_strategies, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import DTensorOpTestBase class TestEinsumDims(TestCase): from torch.distributed.tensor._ops._matrix_ops import addmm_strategy from torch.distributed.tensor._ops._matrix_ops import mm_strategy from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_op_strategy.py
test_bmm_dims
def test_bmm_dims(self): equation = "bmk,bkn->bmn" input_dims, output_dim = EinsumDims.parse_equation(equation) edims = EinsumDims.parse_dims(input_dims, output_dim) self.assertEqual(edims.batch_dims, ["b"]) self.assertEqual(edims.contracting_dims, ["k"]) self.assertEqual(edims.lhs_out_only_dims, ["m"]) self.assertEqual(edims.rhs_out_only_dims, ["n"]) equation = "bcmk,bckn->bcmn" input_dims, output_dim = EinsumDims.parse_equation(equation) edims = EinsumDims.parse_dims(input_dims, output_dim) self.assertEqual(edims.batch_dims, ["b", "c"]) self.assertEqual(edims.contracting_dims, ["k"]) self.assertEqual(edims.lhs_out_only_dims, ["m"]) self.assertEqual(edims.rhs_out_only_dims, ["n"])
from itertools import chain import torch from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import ( DTensorSpec, Partial, Replicate, Shard, TensorMeta, ) from torch.distributed.tensor._collective_utils import redistribute_cost from torch.distributed.tensor._op_schema import OpSchema, OpStrategy, PlacementStrategy from torch.distributed.tensor._ops._einsum_strategy import ( EinsumDims, gen_einsum_strategies, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import DTensorOpTestBase class TestEinsumDims(TestCase): from torch.distributed.tensor._ops._matrix_ops import addmm_strategy from torch.distributed.tensor._ops._matrix_ops import mm_strategy from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_tensor/test_op_strategy.py
test_free_dims
def test_free_dims(self): equation = "abc,ab->abc" input_dims, output_dim = EinsumDims.parse_equation(equation) edims = EinsumDims.parse_dims(input_dims, output_dim) self.assertEqual(edims.batch_dims, ["a", "b"]) self.assertEqual(edims.contracting_dims, []) self.assertEqual(edims.lhs_out_only_dims, ["c"]) self.assertEqual(edims.rhs_out_only_dims, []) equation = "abd,bf->abfd" input_dims, output_dim = EinsumDims.parse_equation(equation) edims = EinsumDims.parse_dims(input_dims, output_dim) self.assertEqual(edims.batch_dims, ["b"]) self.assertEqual(edims.contracting_dims, []) self.assertEqual(edims.lhs_out_only_dims, ["a", "d"]) self.assertEqual(edims.rhs_out_only_dims, ["f"])
from itertools import chain import torch from torch.distributed._tensor import DeviceMesh, DTensor from torch.distributed._tensor.placement_types import ( DTensorSpec, Partial, Replicate, Shard, TensorMeta, ) from torch.distributed.tensor._collective_utils import redistribute_cost from torch.distributed.tensor._op_schema import OpSchema, OpStrategy, PlacementStrategy from torch.distributed.tensor._ops._einsum_strategy import ( EinsumDims, gen_einsum_strategies, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import DTensorOpTestBase class TestEinsumDims(TestCase): from torch.distributed.tensor._ops._matrix_ops import addmm_strategy from torch.distributed.tensor._ops._matrix_ops import mm_strategy from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added