library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/custom_operator/test_custom_classes.py
f
def f(): val = torch.classes._TorchScriptTesting._Foo(5, 3) return val.info() self.assertEqual(*test_equality(f, lambda x: x))
import unittest import torch from torch import ops import torch.jit as jit import glob import os from torch.testing._internal.common_utils import TestCase, run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/custom_operator/test_custom_ops.py
test_op_with_no_abstract_impl_pystub
def test_op_with_no_abstract_impl_pystub(self): x = torch.randn(3, device="meta") if utils.requires_set_python_module(): with self.assertRaisesRegex(RuntimeError, "pointwise"): torch.ops.custom.tan(x) else: # Smoketest torch.ops.custom.tan(x)
import os.path import sys import tempfile import unittest from model import get_custom_op_library_path, Model import torch import torch._library.utils as utils from torch import ops from torch.testing._internal.common_utils import IS_WINDOWS, run_tests, TestCase class TestCustomOperators(TestCase): from functorch import make_fx
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/cpp_api_parity/utils.py
compute_temp_file_path
def compute_temp_file_path(cpp_tmp_folder, variant_name, file_suffix): return os.path.join(cpp_tmp_folder, '{}_{}.pt'.format(variant_name, file_suffix))
def compute_temp_file_path(cpp_tmp_folder, variant_name, file_suffix): return os.path.join(cpp_tmp_folder, f"{variant_name}_{file_suffix}.pt")
from collections import namedtuple import unittest import os import warnings import shutil import torch import torch.utils.cpp_extension import torch.testing._internal.common_nn as common_nn from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( 'TorchNNModuleTestParams', [ # NN module name (e.g. "BCELoss") 'module_name', # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") 'module_variant_name', # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) 'cpp_constructor_args', # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) TorchNNFunctionalTestParams = namedtuple( 'TorchNNFunctionalTestParams', [ # NN functional name (e.g. "binary_cross_entropy") 'functional_name', # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") 'functional_variant_name', # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) 'cpp_function_call', # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) CppArg = namedtuple('CppArg', ['name', 'value']) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
import os import shutil import unittest import warnings from collections import namedtuple import torch import torch.testing._internal.common_nn as common_nn import torch.utils.cpp_extension from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( "TorchNNModuleTestParams", [ # NN module name (e.g. "BCELoss") "module_name", # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") "module_variant_name", # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) "cpp_constructor_args", # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) TorchNNFunctionalTestParams = namedtuple( "TorchNNFunctionalTestParams", [ # NN functional name (e.g. "binary_cross_entropy") "functional_name", # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") "functional_variant_name", # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) "cpp_function_call", # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) CppArg = namedtuple("CppArg", ["name", "value"]) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/autograd/test_logging.py
test_logging
def test_logging(self, records): a = torch.rand(10, requires_grad=True) b = a.mul(2).div(3).sum() c = b.clone() torch.autograd.backward((b, c)) self.assertEqual(len(records), 5) expected = [ "CloneBackward0", "SumBackward0", "DivBackward0", "MulBackward0", "AccumulateGrad", ] for i, record in enumerate(records): self.assertIn(expected[i], record.getMessage())
import logging import torch from torch.testing._internal.logging_utils import LoggingTestCase, make_logging_test class TestAutogradLogging(LoggingTestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/backends/xeon/test_launch.py
test_multi_threads
def test_multi_threads(self): num = 0 with subprocess.Popen(f"python -m torch.backends.xeon.run_cpu --ninstances 4 --use-default-allocator \ --disable-iomp --disable-numactl --log-path {self._test_dir} --no-python pwd", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as p: for line in p.stdout.readlines(): segs = str(line, "utf-8").strip().split("-") if segs[-1].strip() == "pwd": num += 1 assert num == 4, "Failed to launch multiple instances for inference"
def test_multi_threads(self): num = 0 with subprocess.Popen( f"python -m torch.backends.xeon.run_cpu --ninstances 4 --use-default-allocator \ --disable-iomp --disable-numactl --disable-taskset --log-path {self._test_dir} --no-python pwd", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) as p: for line in p.stdout.readlines(): segs = str(line, "utf-8").strip().split("-") if segs[-1].strip() == "pwd": num += 1 assert num == 4, "Failed to launch multiple instances for inference"
from torch.testing._internal.common_utils import TestCase, run_tests, IS_LINUX import shutil import subprocess import tempfile import unittest @unittest.skipIf(not IS_LINUX, "Only works on linux") class TestTorchrun(TestCase): from torch.backends.xeon.run_cpu import _CPUinfo
import shutil import subprocess import tempfile import unittest from torch.testing._internal.common_utils import IS_LINUX, run_tests, TestCase @unittest.skipIf(not IS_LINUX, "Only works on linux") class TestTorchrun(TestCase): from torch.backends.xeon.run_cpu import _CPUinfo
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/benchmark_utils/test_benchmark_utils.py
to_entry
def to_entry(fn_counts): return [f"{c} {fn.replace(f'/{user}/', '/test_user/')}" for c, fn in fn_counts] artifacts = { "baseline_inclusive": to_entry(stats_no_data.baseline_inclusive_stats), "baseline_exclusive": to_entry(stats_no_data.baseline_exclusive_stats), "ones_no_data_inclusive": to_entry(stats_no_data.stmt_inclusive_stats), "ones_no_data_exclusive": to_entry(stats_no_data.stmt_exclusive_stats), "ones_with_data_inclusive": to_entry(stats_with_data.stmt_inclusive_stats), "ones_with_data_exclusive": to_entry(stats_with_data.stmt_exclusive_stats), } with open(CALLGRIND_ARTIFACTS, "wt") as f: json.dump(artifacts, f, indent=4)
def to_entry(fn_counts): return [f"{c} {fn.replace(f'/{user}/', '/test_user/')}" for c, fn in fn_counts] artifacts = { "baseline_inclusive": to_entry(stats_no_data.baseline_inclusive_stats), "baseline_exclusive": to_entry(stats_no_data.baseline_exclusive_stats), "ones_no_data_inclusive": to_entry(stats_no_data.stmt_inclusive_stats), "ones_no_data_exclusive": to_entry(stats_no_data.stmt_exclusive_stats), "ones_with_data_inclusive": to_entry(stats_with_data.stmt_inclusive_stats), "ones_with_data_exclusive": to_entry(stats_with_data.stmt_exclusive_stats), } with open(CALLGRIND_ARTIFACTS, "w") as f: json.dump(artifacts, f, indent=4)
import collections import json import os import re import textwrap import timeit from typing import Any, List, Tuple import unittest import torch import torch.utils.benchmark as benchmark_utils from torch.testing._internal.common_utils import TestCase, run_tests, IS_SANDCASTLE, IS_WINDOWS, slowTest import expecttest import numpy as np from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import wrapper_singleton
import collections import json import os import re import textwrap import timeit import unittest from typing import Any, List, Tuple import expecttest import numpy as np import torch import torch.utils.benchmark as benchmark_utils from torch.testing._internal.common_utils import ( IS_SANDCASTLE, IS_WINDOWS, run_tests, slowTest, TEST_WITH_ASAN, TestCase, ) from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import ( wrapper_singleton, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/cpp_api_parity/utils.py
set_cpp_tensors_requires_grad
def set_cpp_tensors_requires_grad(cpp_tensor_stmts, python_tensors): assert len(cpp_tensor_stmts) == len(python_tensors) return ['{}.requires_grad_(true)'.format(tensor_stmt) if tensor.dtype != torch.long else tensor_stmt for tensor_stmt, (_, tensor) in zip(cpp_tensor_stmts, python_tensors)]
def set_cpp_tensors_requires_grad(cpp_tensor_stmts, python_tensors): assert len(cpp_tensor_stmts) == len(python_tensors) return [ f"{tensor_stmt}.requires_grad_(true)" if tensor.dtype != torch.long else tensor_stmt for tensor_stmt, (_, tensor) in zip(cpp_tensor_stmts, python_tensors) ]
from collections import namedtuple import unittest import os import warnings import shutil import torch import torch.utils.cpp_extension import torch.testing._internal.common_nn as common_nn from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( 'TorchNNModuleTestParams', [ # NN module name (e.g. "BCELoss") 'module_name', # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") 'module_variant_name', # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) 'cpp_constructor_args', # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) TorchNNFunctionalTestParams = namedtuple( 'TorchNNFunctionalTestParams', [ # NN functional name (e.g. "binary_cross_entropy") 'functional_name', # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") 'functional_variant_name', # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) 'cpp_function_call', # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) CppArg = namedtuple('CppArg', ['name', 'value']) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
import os import shutil import unittest import warnings from collections import namedtuple import torch import torch.testing._internal.common_nn as common_nn import torch.utils.cpp_extension from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( "TorchNNModuleTestParams", [ # NN module name (e.g. "BCELoss") "module_name", # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") "module_variant_name", # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) "cpp_constructor_args", # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) TorchNNFunctionalTestParams = namedtuple( "TorchNNFunctionalTestParams", [ # NN functional name (e.g. "binary_cross_entropy") "functional_name", # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") "functional_variant_name", # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) "cpp_function_call", # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) CppArg = namedtuple("CppArg", ["name", "value"]) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/cpp_api_parity/utils.py
move_cpp_tensors_to_device
def move_cpp_tensors_to_device(cpp_tensor_stmts, device): return ['{}.to("{}")'.format(tensor_stmt, device) for tensor_stmt in cpp_tensor_stmts]
def move_cpp_tensors_to_device(cpp_tensor_stmts, device): return [f'{tensor_stmt}.to("{device}")' for tensor_stmt in cpp_tensor_stmts]
from collections import namedtuple import unittest import os import warnings import shutil import torch import torch.utils.cpp_extension import torch.testing._internal.common_nn as common_nn from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( 'TorchNNModuleTestParams', [ # NN module name (e.g. "BCELoss") 'module_name', # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") 'module_variant_name', # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) 'cpp_constructor_args', # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) TorchNNFunctionalTestParams = namedtuple( 'TorchNNFunctionalTestParams', [ # NN functional name (e.g. "binary_cross_entropy") 'functional_name', # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") 'functional_variant_name', # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) 'cpp_function_call', # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) CppArg = namedtuple('CppArg', ['name', 'value']) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
import os import shutil import unittest import warnings from collections import namedtuple import torch import torch.testing._internal.common_nn as common_nn import torch.utils.cpp_extension from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( "TorchNNModuleTestParams", [ # NN module name (e.g. "BCELoss") "module_name", # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") "module_variant_name", # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) "cpp_constructor_args", # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) TorchNNFunctionalTestParams = namedtuple( "TorchNNFunctionalTestParams", [ # NN functional name (e.g. "binary_cross_entropy") "functional_name", # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") "functional_variant_name", # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) "cpp_function_call", # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) CppArg = namedtuple("CppArg", ["name", "value"]) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/cpp_api_parity/utils.py
compute_cpp_args_construction_stmts_and_forward_arg_symbols
def compute_cpp_args_construction_stmts_and_forward_arg_symbols(test_params): device = test_params.device cpp_forward_args_symbols = [] def add_cpp_forward_args(args): args_stmts = [] for arg_name, _ in args: args_stmts.append('auto {} = arg_dict.at("{}")'.format(arg_name, arg_name)) cpp_forward_args_symbols.append(arg_name) return args_stmts cpp_forward_input_args_stmts = set_cpp_tensors_requires_grad(move_cpp_tensors_to_device( add_cpp_forward_args(test_params.arg_dict['input']), device), test_params.arg_dict['input']) cpp_forward_target_args_stmts = move_cpp_tensors_to_device( add_cpp_forward_args(test_params.arg_dict['target']), device) cpp_forward_extra_args_stmts = move_cpp_tensors_to_device( add_cpp_forward_args(test_params.arg_dict['extra_args']), device) # Build the list of other arguments needed cpp_other_args_stmts = [] for arg_name, _ in test_params.arg_dict['other']: cpp_other_args_stmts.append('auto {} = arg_dict.at("{}")'.format(arg_name, arg_name)) cpp_other_args_stmts = move_cpp_tensors_to_device(cpp_other_args_stmts, device) cpp_args_construction_stmts = cpp_forward_input_args_stmts + cpp_forward_target_args_stmts + \ cpp_forward_extra_args_stmts + cpp_other_args_stmts return cpp_args_construction_stmts, cpp_forward_args_symbols
def compute_cpp_args_construction_stmts_and_forward_arg_symbols(test_params): device = test_params.device cpp_forward_args_symbols = [] def add_cpp_forward_args(args): args_stmts = [] for arg_name, _ in args: args_stmts.append(f'auto {arg_name} = arg_dict.at("{arg_name}")') cpp_forward_args_symbols.append(arg_name) return args_stmts cpp_forward_input_args_stmts = set_cpp_tensors_requires_grad( move_cpp_tensors_to_device( add_cpp_forward_args(test_params.arg_dict["input"]), device ), test_params.arg_dict["input"], ) cpp_forward_target_args_stmts = move_cpp_tensors_to_device( add_cpp_forward_args(test_params.arg_dict["target"]), device ) cpp_forward_extra_args_stmts = move_cpp_tensors_to_device( add_cpp_forward_args(test_params.arg_dict["extra_args"]), device ) # Build the list of other arguments needed cpp_other_args_stmts = [] for arg_name, _ in test_params.arg_dict["other"]: cpp_other_args_stmts.append(f'auto {arg_name} = arg_dict.at("{arg_name}")') cpp_other_args_stmts = move_cpp_tensors_to_device(cpp_other_args_stmts, device) cpp_args_construction_stmts = ( cpp_forward_input_args_stmts + cpp_forward_target_args_stmts + cpp_forward_extra_args_stmts + cpp_other_args_stmts ) return cpp_args_construction_stmts, cpp_forward_args_symbols
from collections import namedtuple import unittest import os import warnings import shutil import torch import torch.utils.cpp_extension import torch.testing._internal.common_nn as common_nn from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( 'TorchNNModuleTestParams', [ # NN module name (e.g. "BCELoss") 'module_name', # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") 'module_variant_name', # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) 'cpp_constructor_args', # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) TorchNNFunctionalTestParams = namedtuple( 'TorchNNFunctionalTestParams', [ # NN functional name (e.g. "binary_cross_entropy") 'functional_name', # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") 'functional_variant_name', # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) 'cpp_function_call', # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) CppArg = namedtuple('CppArg', ['name', 'value']) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
import os import shutil import unittest import warnings from collections import namedtuple import torch import torch.testing._internal.common_nn as common_nn import torch.utils.cpp_extension from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( "TorchNNModuleTestParams", [ # NN module name (e.g. "BCELoss") "module_name", # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") "module_variant_name", # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) "cpp_constructor_args", # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) TorchNNFunctionalTestParams = namedtuple( "TorchNNFunctionalTestParams", [ # NN functional name (e.g. "binary_cross_entropy") "functional_name", # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") "functional_variant_name", # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) "cpp_function_call", # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) CppArg = namedtuple("CppArg", ["name", "value"]) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/cpp_api_parity/utils.py
add_cpp_forward_args
def add_cpp_forward_args(args): args_stmts = [] for arg_name, _ in args: args_stmts.append('auto {} = arg_dict.at("{}")'.format(arg_name, arg_name)) cpp_forward_args_symbols.append(arg_name) return args_stmts cpp_forward_input_args_stmts = set_cpp_tensors_requires_grad(move_cpp_tensors_to_device( add_cpp_forward_args(test_params.arg_dict['input']), device), test_params.arg_dict['input']) cpp_forward_target_args_stmts = move_cpp_tensors_to_device( add_cpp_forward_args(test_params.arg_dict['target']), device) cpp_forward_extra_args_stmts = move_cpp_tensors_to_device( add_cpp_forward_args(test_params.arg_dict['extra_args']), device) # Build the list of other arguments needed cpp_other_args_stmts = [] for arg_name, _ in test_params.arg_dict['other']: cpp_other_args_stmts.append('auto {} = arg_dict.at("{}")'.format(arg_name, arg_name)) cpp_other_args_stmts = move_cpp_tensors_to_device(cpp_other_args_stmts, device) cpp_args_construction_stmts = cpp_forward_input_args_stmts + cpp_forward_target_args_stmts + \ cpp_forward_extra_args_stmts + cpp_other_args_stmts return cpp_args_construction_stmts, cpp_forward_args_symbols
def add_cpp_forward_args(args): args_stmts = [] for arg_name, _ in args: args_stmts.append(f'auto {arg_name} = arg_dict.at("{arg_name}")') cpp_forward_args_symbols.append(arg_name) return args_stmts cpp_forward_input_args_stmts = set_cpp_tensors_requires_grad( move_cpp_tensors_to_device( add_cpp_forward_args(test_params.arg_dict["input"]), device ), test_params.arg_dict["input"], ) cpp_forward_target_args_stmts = move_cpp_tensors_to_device( add_cpp_forward_args(test_params.arg_dict["target"]), device ) cpp_forward_extra_args_stmts = move_cpp_tensors_to_device( add_cpp_forward_args(test_params.arg_dict["extra_args"]), device ) # Build the list of other arguments needed cpp_other_args_stmts = [] for arg_name, _ in test_params.arg_dict["other"]: cpp_other_args_stmts.append(f'auto {arg_name} = arg_dict.at("{arg_name}")') cpp_other_args_stmts = move_cpp_tensors_to_device(cpp_other_args_stmts, device) cpp_args_construction_stmts = ( cpp_forward_input_args_stmts + cpp_forward_target_args_stmts + cpp_forward_extra_args_stmts + cpp_other_args_stmts ) return cpp_args_construction_stmts, cpp_forward_args_symbols
from collections import namedtuple import unittest import os import warnings import shutil import torch import torch.utils.cpp_extension import torch.testing._internal.common_nn as common_nn from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( 'TorchNNModuleTestParams', [ # NN module name (e.g. "BCELoss") 'module_name', # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") 'module_variant_name', # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) 'cpp_constructor_args', # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) TorchNNFunctionalTestParams = namedtuple( 'TorchNNFunctionalTestParams', [ # NN functional name (e.g. "binary_cross_entropy") 'functional_name', # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") 'functional_variant_name', # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) 'cpp_function_call', # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) CppArg = namedtuple('CppArg', ['name', 'value']) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
import os import shutil import unittest import warnings from collections import namedtuple import torch import torch.testing._internal.common_nn as common_nn import torch.utils.cpp_extension from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( "TorchNNModuleTestParams", [ # NN module name (e.g. "BCELoss") "module_name", # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") "module_variant_name", # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) "cpp_constructor_args", # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) TorchNNFunctionalTestParams = namedtuple( "TorchNNFunctionalTestParams", [ # NN functional name (e.g. "binary_cross_entropy") "functional_name", # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") "functional_variant_name", # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) "cpp_function_call", # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) CppArg = namedtuple("CppArg", ["name", "value"]) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/cpp_api_parity/utils.py
serialize_arg_dict_as_script_module
def serialize_arg_dict_as_script_module(arg_dict): arg_dict_flat = {arg_name: arg_value for arg_name, arg_value in arg_dict['input'] + arg_dict['target'] + arg_dict['extra_args'] + arg_dict['other']} arg_dict_module = torch.nn.Module() for arg_name, arg_value in arg_dict_flat.items(): assert isinstance(arg_value, torch.Tensor) arg_dict_module.register_buffer(arg_name, arg_value) return torch.jit.script(arg_dict_module) # NOTE: any argument symbol used in `cpp_constructor_args` / `cpp_options_args` / `cpp_function_call` # must have a mapping in `cpp_var_map`. # # The mapping can take one of the following formats: # # 1. `argument_name` -> Python value # 2. `argument_name` -> '_get_input()' (which means `argument_name` in C++ will be bound to `test_instance._get_input()`) # # For example: # ``` # def bceloss_weights_no_reduce_test(): # t = torch.randn(15, 10).gt(0).double() # weights = torch.rand(10) # return dict( # fullname='BCELoss_weights_no_reduce', # constructor=wrap_functional( # lambda i: F.binary_cross_entropy(i, t.type_as(i), # weight=weights.type_as(i), reduction='none')), # cpp_function_call='''F::binary_cross_entropy( # i, t.to(i.options()), # F::BinaryCrossEntropyFuncOptions() # .weight(weights.to(i.options())) # .reduction(torch::kNone))''', # input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), # cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights}, # reference_fn=lambda i, p, m: -(t * i.log() + (1 - t) * (1 - i).log()) * weights, # ) # ```
def serialize_arg_dict_as_script_module(arg_dict): arg_dict_flat = dict( arg_dict["input"] + arg_dict["target"] + arg_dict["extra_args"] + arg_dict["other"] ) arg_dict_module = torch.nn.Module() for arg_name, arg_value in arg_dict_flat.items(): assert isinstance(arg_value, torch.Tensor) arg_dict_module.register_buffer(arg_name, arg_value) return torch.jit.script(arg_dict_module) # NOTE: any argument symbol used in `cpp_constructor_args` / `cpp_options_args` / `cpp_function_call` # must have a mapping in `cpp_var_map`. # # The mapping can take one of the following formats: # # 1. `argument_name` -> Python value # 2. `argument_name` -> '_get_input()' (which means `argument_name` in C++ will be bound to `test_instance._get_input()`) # # For example: # ``` # def bceloss_weights_no_reduce_test(): # t = torch.randn(15, 10).gt(0).double() # weights = torch.rand(10) # return dict( # fullname='BCELoss_weights_no_reduce', # constructor=wrap_functional( # lambda i: F.binary_cross_entropy(i, t.type_as(i), # weight=weights.type_as(i), reduction='none')), # cpp_function_call='''F::binary_cross_entropy( # i, t.to(i.options()), # F::BinaryCrossEntropyFuncOptions() # .weight(weights.to(i.options())) # .reduction(torch::kNone))''', # input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), # cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights}, # reference_fn=lambda i, p, m: -(t * i.log() + (1 - t) * (1 - i).log()) * weights, # ) # ```
from collections import namedtuple import unittest import os import warnings import shutil import torch import torch.utils.cpp_extension import torch.testing._internal.common_nn as common_nn from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( 'TorchNNModuleTestParams', [ # NN module name (e.g. "BCELoss") 'module_name', # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") 'module_variant_name', # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) 'cpp_constructor_args', # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) TorchNNFunctionalTestParams = namedtuple( 'TorchNNFunctionalTestParams', [ # NN functional name (e.g. "binary_cross_entropy") 'functional_name', # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") 'functional_variant_name', # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) 'cpp_function_call', # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) CppArg = namedtuple('CppArg', ['name', 'value']) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
import os import shutil import unittest import warnings from collections import namedtuple import torch import torch.testing._internal.common_nn as common_nn import torch.utils.cpp_extension from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( "TorchNNModuleTestParams", [ # NN module name (e.g. "BCELoss") "module_name", # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") "module_variant_name", # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) "cpp_constructor_args", # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) TorchNNFunctionalTestParams = namedtuple( "TorchNNFunctionalTestParams", [ # NN functional name (e.g. "binary_cross_entropy") "functional_name", # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") "functional_variant_name", # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) "cpp_function_call", # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) CppArg = namedtuple("CppArg", ["name", "value"]) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/cpp_api_parity/utils.py
compute_arg_dict
def compute_arg_dict(test_params_dict, test_instance): arg_dict = { 'input': [], 'target': [], 'extra_args': [], 'other': [], } def put_args_into_arg_dict(arg_type, arg_type_prefix, args): for i, arg in enumerate(args): arg_dict[arg_type].append(CppArg(name=arg_type_prefix + str(i), value=arg)) put_args_into_arg_dict('input', 'i', convert_to_list(test_instance._get_input())) if is_criterion_test(test_instance): put_args_into_arg_dict('target', 't', convert_to_list(test_instance._get_target())) if test_instance.extra_args: put_args_into_arg_dict('extra_args', 'e', convert_to_list(test_instance.extra_args)) cpp_var_map = test_params_dict.get('cpp_var_map', {}) for arg_name, arg_value in cpp_var_map.items(): if isinstance(arg_value, str): if arg_value == '_get_input()': arg_dict['other'].append(CppArg(name=arg_name, value=test_instance._get_input())) else: raise RuntimeError("`{}` has unsupported string value: {}".format(arg_name, arg_value)) elif isinstance(arg_value, torch.Tensor): arg_dict['other'].append(CppArg(name=arg_name, value=arg_value)) else: raise RuntimeError("`{}` has unsupported value: {}".format(arg_name, arg_value)) return arg_dict
def compute_arg_dict(test_params_dict, test_instance): arg_dict = { "input": [], "target": [], "extra_args": [], "other": [], } def put_args_into_arg_dict(arg_type, arg_type_prefix, args): for i, arg in enumerate(args): arg_dict[arg_type].append(CppArg(name=arg_type_prefix + str(i), value=arg)) put_args_into_arg_dict("input", "i", convert_to_list(test_instance._get_input())) if is_criterion_test(test_instance): put_args_into_arg_dict( "target", "t", convert_to_list(test_instance._get_target()) ) if test_instance.extra_args: put_args_into_arg_dict( "extra_args", "e", convert_to_list(test_instance.extra_args) ) cpp_var_map = test_params_dict.get("cpp_var_map", {}) for arg_name, arg_value in cpp_var_map.items(): if isinstance(arg_value, str): if arg_value == "_get_input()": arg_dict["other"].append( CppArg(name=arg_name, value=test_instance._get_input()) ) else: raise RuntimeError( f"`{arg_name}` has unsupported string value: {arg_value}" ) elif isinstance(arg_value, torch.Tensor): arg_dict["other"].append(CppArg(name=arg_name, value=arg_value)) else: raise RuntimeError(f"`{arg_name}` has unsupported value: {arg_value}") return arg_dict
from collections import namedtuple import unittest import os import warnings import shutil import torch import torch.utils.cpp_extension import torch.testing._internal.common_nn as common_nn from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( 'TorchNNModuleTestParams', [ # NN module name (e.g. "BCELoss") 'module_name', # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") 'module_variant_name', # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) 'cpp_constructor_args', # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) TorchNNFunctionalTestParams = namedtuple( 'TorchNNFunctionalTestParams', [ # NN functional name (e.g. "binary_cross_entropy") 'functional_name', # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") 'functional_variant_name', # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) 'cpp_function_call', # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) CppArg = namedtuple('CppArg', ['name', 'value']) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
import os import shutil import unittest import warnings from collections import namedtuple import torch import torch.testing._internal.common_nn as common_nn import torch.utils.cpp_extension from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( "TorchNNModuleTestParams", [ # NN module name (e.g. "BCELoss") "module_name", # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") "module_variant_name", # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) "cpp_constructor_args", # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) TorchNNFunctionalTestParams = namedtuple( "TorchNNFunctionalTestParams", [ # NN functional name (e.g. "binary_cross_entropy") "functional_name", # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") "functional_variant_name", # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) "cpp_function_call", # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) CppArg = namedtuple("CppArg", ["name", "value"]) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/cpp_api_parity/utils.py
put_args_into_arg_dict
def put_args_into_arg_dict(arg_type, arg_type_prefix, args): for i, arg in enumerate(args): arg_dict[arg_type].append(CppArg(name=arg_type_prefix + str(i), value=arg)) put_args_into_arg_dict('input', 'i', convert_to_list(test_instance._get_input())) if is_criterion_test(test_instance): put_args_into_arg_dict('target', 't', convert_to_list(test_instance._get_target())) if test_instance.extra_args: put_args_into_arg_dict('extra_args', 'e', convert_to_list(test_instance.extra_args)) cpp_var_map = test_params_dict.get('cpp_var_map', {}) for arg_name, arg_value in cpp_var_map.items(): if isinstance(arg_value, str): if arg_value == '_get_input()': arg_dict['other'].append(CppArg(name=arg_name, value=test_instance._get_input())) else: raise RuntimeError("`{}` has unsupported string value: {}".format(arg_name, arg_value)) elif isinstance(arg_value, torch.Tensor): arg_dict['other'].append(CppArg(name=arg_name, value=arg_value)) else: raise RuntimeError("`{}` has unsupported value: {}".format(arg_name, arg_value)) return arg_dict
def put_args_into_arg_dict(arg_type, arg_type_prefix, args): for i, arg in enumerate(args): arg_dict[arg_type].append(CppArg(name=arg_type_prefix + str(i), value=arg)) put_args_into_arg_dict("input", "i", convert_to_list(test_instance._get_input())) if is_criterion_test(test_instance): put_args_into_arg_dict( "target", "t", convert_to_list(test_instance._get_target()) ) if test_instance.extra_args: put_args_into_arg_dict( "extra_args", "e", convert_to_list(test_instance.extra_args) ) cpp_var_map = test_params_dict.get("cpp_var_map", {}) for arg_name, arg_value in cpp_var_map.items(): if isinstance(arg_value, str): if arg_value == "_get_input()": arg_dict["other"].append( CppArg(name=arg_name, value=test_instance._get_input()) ) else: raise RuntimeError( f"`{arg_name}` has unsupported string value: {arg_value}" ) elif isinstance(arg_value, torch.Tensor): arg_dict["other"].append(CppArg(name=arg_name, value=arg_value)) else: raise RuntimeError(f"`{arg_name}` has unsupported value: {arg_value}") return arg_dict
from collections import namedtuple import unittest import os import warnings import shutil import torch import torch.utils.cpp_extension import torch.testing._internal.common_nn as common_nn from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( 'TorchNNModuleTestParams', [ # NN module name (e.g. "BCELoss") 'module_name', # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") 'module_variant_name', # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) 'cpp_constructor_args', # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) TorchNNFunctionalTestParams = namedtuple( 'TorchNNFunctionalTestParams', [ # NN functional name (e.g. "binary_cross_entropy") 'functional_name', # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") 'functional_variant_name', # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test 'test_instance', # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) 'cpp_function_call', # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) 'arg_dict', # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) 'has_parity', # Device (e.g. "cuda") 'device', # Temporary folder to store C++ outputs (to be compared with Python outputs later) 'cpp_tmp_folder', ] ) CppArg = namedtuple('CppArg', ['name', 'value']) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
import os import shutil import unittest import warnings from collections import namedtuple import torch import torch.testing._internal.common_nn as common_nn import torch.utils.cpp_extension from torch.testing._internal.common_cuda import TEST_CUDA TorchNNModuleTestParams = namedtuple( "TorchNNModuleTestParams", [ # NN module name (e.g. "BCELoss") "module_name", # Unique identifier for this module config (e.g. "BCELoss_weights_cuda") "module_variant_name", # An instance of an NN test class (e.g. `CriterionTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # Constructor arguments passed to the C++ module constructor, which must be # strictly equivalent to the Python module constructor arguments # (e.g. `torch::nn::BCELossOptions().weight(torch::rand(10))`, # which is strictly equivalent to passing `torch.rand(10)` to `torch.nn.BCELoss` # constructor in Python) "cpp_constructor_args", # All arguments used in NN module's forward pass. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN module test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) TorchNNFunctionalTestParams = namedtuple( "TorchNNFunctionalTestParams", [ # NN functional name (e.g. "binary_cross_entropy") "functional_name", # Unique identifier for this functional config (e.g. "BCELoss_no_reduce_cuda") "functional_variant_name", # An instance of an NN test class (e.g. `NewModuleTest`) which stores # necessary information (e.g. input / target / extra_args) for running the Python test "test_instance", # The C++ function call that is strictly equivalent to the Python function call # (e.g. "F::binary_cross_entropy( # i, t.to(i.options()),F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))", # which is strictly equivalent to `F.binary_cross_entropy(i, t.type_as(i), reduction='none')` in Python) "cpp_function_call", # All arguments used in NN functional's function call. # Please see `compute_arg_dict` function for details on how we construct this dict. # (e.g. # ``` # arg_dict = { # 'input': [python_input_tensor], # 'target': [python_target_tensor], # 'extra_args': [], # 'other': [], # } # ``` # ) "arg_dict", # Whether we expect this NN functional test to pass the Python/C++ parity test # (e.g. `True`) "has_parity", # Device (e.g. "cuda") "device", # Temporary folder to store C++ outputs (to be compared with Python outputs later) "cpp_tmp_folder", ], ) CppArg = namedtuple("CppArg", ["name", "value"]) TORCH_NN_COMMON_TEST_HARNESS = """ #include <torch/script.h> void write_ivalue_to_file(const torch::IValue& ivalue, const std::string& file_path) { auto bytes = torch::jit::pickle_save(ivalue); std::ofstream fout(file_path, std::ios::out | std::ios::binary); fout.write(bytes.data(), bytes.size()); fout.close(); } c10::Dict<std::string, torch::Tensor> load_dict_from_file(const std::string& file_path) { c10::Dict<std::string, torch::Tensor> arg_dict; auto arg_dict_module = torch::jit::load(file_path); for (const auto& p : arg_dict_module.named_buffers(/*recurse=*/false)) { arg_dict.insert(p.name, p.value); } return arg_dict; } // Generates rand tensor with non-equal values. This ensures that duplicate // values won't be causing test failure for modules like MaxPooling. // size should be small, otherwise randperm fails / long overflows. torch::Tensor _rand_tensor_non_equal(torch::IntArrayRef size) { int64_t total = 1; for (int64_t elem : size) { total *= elem; } return torch::randperm(total).view(size).to(torch::kDouble); } """
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/custom_operator/my_custom_ops.py
nonzero_abstract
def nonzero_abstract(x): n = x.dim() ctx = torch.library.get_ctx() nnz = ctx.create_unbacked_symint() shape = [nnz, n] return x.new_empty(shape, dtype=torch.long)
from model import get_custom_op_library_path import torch
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/custom_operator/my_custom_ops2.py
sin_abstract
def sin_abstract(x): return torch.empty_like(x)
from model import get_custom_op_library_path import torch
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/custom_operator/pointwise.py
tan_abstract
def tan_abstract(x): return torch.empty_like(x)
from model import get_custom_op_library_path import torch
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/custom_operator/test_custom_classes.py
get_custom_class_library_path
def get_custom_class_library_path(): library_filename = glob.glob("build/*custom_class*") assert (len(library_filename) == 1) library_filename = library_filename[0] path = os.path.abspath(library_filename) assert os.path.exists(path), path return path
import unittest import torch from torch import ops import torch.jit as jit import glob import os from torch.testing._internal.common_utils import TestCase, run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/custom_operator/test_custom_classes.py
test_equality
def test_equality(f, cmp_key): obj1 = f() obj2 = jit.script(f)() return (cmp_key(obj1), cmp_key(obj2)) class TestCustomOperators(TestCase): def setUp(self): ops.load_library(get_custom_class_library_path()) def test_no_return_class(self): def f(): val = torch.classes._TorchScriptTesting._Foo(5, 3) return val.info() self.assertEqual(*test_equality(f, lambda x: x)) def test_constructor_with_args(self): def f(): val = torch.classes._TorchScriptTesting._Foo(5, 3) return val self.assertEqual(*test_equality(f, lambda x: x.info())) def test_function_call_with_args(self): def f(): val = torch.classes._TorchScriptTesting._Foo(5, 3) val.increment(1) return val self.assertEqual(*test_equality(f, lambda x: x.info())) def test_function_method_wrong_type(self): def f(): val = torch.classes._TorchScriptTesting._Foo(5, 3) val.increment("asdf") return val with self.assertRaisesRegex(RuntimeError, "Expected"): jit.script(f)() @unittest.skip("We currently don't support passing custom classes to custom methods.") def test_input_class_type(self): def f(): val = torch.classes._TorchScriptTesting._Foo(1, 2) val2 = torch.classes._TorchScriptTesting._Foo(2, 3) val.combine(val2) return val self.assertEqual(*test_equality(f, lambda x: x.info())) def test_stack_string(self): def f(): val = torch.classes._TorchScriptTesting._StackString(["asdf", "bruh"]) return val.pop() self.assertEqual(*test_equality(f, lambda x: x)) def test_stack_push_pop(self): def f(): val = torch.classes._TorchScriptTesting._StackString(["asdf", "bruh"]) val2 = torch.classes._TorchScriptTesting._StackString(["111", "222"]) val.push(val2.pop()) return val.pop() + val2.pop() self.assertEqual(*test_equality(f, lambda x: x)) if __name__ == "__main__": run_tests()
import unittest import torch from torch import ops import torch.jit as jit import glob import os from torch.testing._internal.common_utils import TestCase, run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/custom_operator/test_custom_classes.py
setUp
def setUp(self): ops.load_library(get_custom_class_library_path())
import unittest import torch from torch import ops import torch.jit as jit import glob import os from torch.testing._internal.common_utils import TestCase, run_tests class TestCustomOperators(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/benchmark_utils/test_benchmark_utils.py
test_cpp_timer
def test_cpp_timer(self): timer = benchmark_utils.Timer( """ #ifndef TIMER_GLOBAL_CHECK static_assert(false); #endif torch::Tensor y = x + 1; """, setup="torch::Tensor x = torch::empty({1});", global_setup="#define TIMER_GLOBAL_CHECK", timer=timeit.default_timer, language=benchmark_utils.Language.CPP, ) t = timer.timeit(10) self.assertIsInstance(t.median, float) class _MockTimer: _seed = 0 _timer_noise_level = 0.05 _timer_cost = 100e-9 # 100 ns _function_noise_level = 0.05 _function_costs = ( ("pass", 8e-9), ("cheap_fn()", 4e-6), ("expensive_fn()", 20e-6), ("with torch.no_grad():\n y = x + 1", 10e-6), ) def __init__(self, stmt, setup, timer, globals): self._random_state = np.random.RandomState(seed=self._seed) self._mean_cost = {k: v for k, v in self._function_costs}[stmt] def sample(self, mean, noise_level): return max(self._random_state.normal(mean, mean * noise_level), 5e-9) def timeit(self, number): return sum([ # First timer invocation self.sample(self._timer_cost, self._timer_noise_level), # Stmt body self.sample(self._mean_cost * number, self._function_noise_level), # Second timer invocation self.sample(self._timer_cost, self._timer_noise_level), ])
def test_cpp_timer(self): timer = benchmark_utils.Timer( """ #ifndef TIMER_GLOBAL_CHECK static_assert(false); #endif torch::Tensor y = x + 1; """, setup="torch::Tensor x = torch::empty({1});", global_setup="#define TIMER_GLOBAL_CHECK", timer=timeit.default_timer, language=benchmark_utils.Language.CPP, ) t = timer.timeit(10) self.assertIsInstance(t.median, float) class _MockTimer: _seed = 0 _timer_noise_level = 0.05 _timer_cost = 100e-9 # 100 ns _function_noise_level = 0.05 _function_costs = ( ("pass", 8e-9), ("cheap_fn()", 4e-6), ("expensive_fn()", 20e-6), ("with torch.no_grad():\n y = x + 1", 10e-6), ) def __init__(self, stmt, setup, timer, globals): self._random_state = np.random.RandomState(seed=self._seed) self._mean_cost = dict(self._function_costs)[stmt] def sample(self, mean, noise_level): return max(self._random_state.normal(mean, mean * noise_level), 5e-9) def timeit(self, number): return sum( [ # First timer invocation self.sample(self._timer_cost, self._timer_noise_level), # Stmt body self.sample(self._mean_cost * number, self._function_noise_level), # Second timer invocation self.sample(self._timer_cost, self._timer_noise_level), ] )
import collections import json import os import re import textwrap import timeit from typing import Any, List, Tuple import unittest import torch import torch.utils.benchmark as benchmark_utils from torch.testing._internal.common_utils import TestCase, run_tests, IS_SANDCASTLE, IS_WINDOWS, slowTest import expecttest import numpy as np class TestBenchmarkUtils(TestCase): from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import wrapper_singleton
import collections import json import os import re import textwrap import timeit import unittest from typing import Any, List, Tuple import expecttest import numpy as np import torch import torch.utils.benchmark as benchmark_utils from torch.testing._internal.common_utils import ( IS_SANDCASTLE, IS_WINDOWS, run_tests, slowTest, TEST_WITH_ASAN, TestCase, ) class TestBenchmarkUtils(TestCase): from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import ( wrapper_singleton, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/benchmark_utils/test_benchmark_utils.py
__init__
def __init__(self, stmt, setup, timer, globals): self._random_state = np.random.RandomState(seed=self._seed) self._mean_cost = {k: v for k, v in self._function_costs}[stmt]
def __init__(self, stmt, setup, timer, globals): self._random_state = np.random.RandomState(seed=self._seed) self._mean_cost = dict(self._function_costs)[stmt]
import collections import json import os import re import textwrap import timeit from typing import Any, List, Tuple import unittest import torch import torch.utils.benchmark as benchmark_utils from torch.testing._internal.common_utils import TestCase, run_tests, IS_SANDCASTLE, IS_WINDOWS, slowTest import expecttest import numpy as np class _MockTimer: from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import wrapper_singleton
import collections import json import os import re import textwrap import timeit import unittest from typing import Any, List, Tuple import expecttest import numpy as np import torch import torch.utils.benchmark as benchmark_utils from torch.testing._internal.common_utils import ( IS_SANDCASTLE, IS_WINDOWS, run_tests, slowTest, TEST_WITH_ASAN, TestCase, ) class _MockTimer: from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import ( wrapper_singleton, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/cpp_extensions/open_registration_extension/pytorch_openreg/_device_daemon.py
run_op
def run_op(allocator, op_name, args, kwargs): op, _ = torch._C._jit_get_operation(op_name) args, kwargs = receive_after_sending(allocator, args, kwargs) return op(*args, **kwargs) class _Daemon: def __init__(self): super().__init__() self.is_initialized = False def _lazy_init(self): if self.is_initialized: return self.req_queue = mp_context.Queue() self.ans_queue = mp_context.Queue() self.runner = mp_context.Process( target=self.run_forever, args=(self.req_queue, self.ans_queue), daemon=True ) self.runner.start() self.is_initialized = True def exec(self, cmd, *args): self._lazy_init() log.info("Main process launched: %s(*%s)", cmd, safe_str(args)) validate_send_queue_args(cmd, args) self.req_queue.put((cmd,) + args) res = self.ans_queue.get() log.info("Main process result for %s received: %s", cmd, safe_str(res)) if res == "ERROR": raise RuntimeError(f"Error in daemon while executing {cmd}, see logs") else: return res @staticmethod def run_forever(req_queue, ans_queue): # Initialize our device global CURR_DEVICE_IDX empty_res = object() allocator = Allocator() # Serve all requests while True: cmd, *args = req_queue.get() log.info("Worker executing: %s", cmd) res = empty_res if cmd == "deviceCount": assert len(args) == 0 res = NUM_DEVICES elif cmd == "getDevice": res = CURR_DEVICE_IDX elif cmd == "uncheckedSetDevice": assert len(args) == 1 CURR_DEVICE_IDX = int(args[0]) res = None elif cmd == "exchangeDevice": assert len(args) == 1 res = CURR_DEVICE_IDX CURR_DEVICE_IDX = int(args[0]) elif cmd == "malloc": res = allocator.malloc(*args) elif cmd == "free": res = allocator.free(*args) elif cmd == "run_op": op_name, args, kwargs = args run_op(allocator, op_name, args, kwargs) res = None elif cmd == "send_data": assert len(args) == 1 res = OpenRegTensorData.from_meta(allocator, args[0]) elif cmd == "recv_data": assert len(args) == 2 host_tensor, dev_mem = args dev_tensor = OpenRegTensorData.from_meta(allocator, dev_mem) dev_tensor.copy_(host_tensor) res = None elif cmd == "get_op_output_shape": op_name, args, kwargs = args res = run_op(allocator, op_name, args, kwargs).size() else: log.warning("Bad command in worker") res = "ERROR" if res == empty_res: raise RuntimeError("Bad impl didn't return anything") log.info("Worker answering to: %s", cmd) ans_queue.put(res) daemon = _Daemon()
import logging import torch from ._meta_parser import ( OpenRegTensorData, receive_after_sending, safe_str, validate_send_queue_args, ) log = logging.getLogger(__name__) mp_context = torch.multiprocessing.get_context("spawn") NUM_DEVICES = 7 CURR_DEVICE_IDX = 0 CURR_STREAM = 0
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/cpp_extensions/open_registration_extension/pytorch_openreg/_device_daemon.py
__init__
def __init__(self): self.allocated = {}
import logging import torch from ._meta_parser import ( OpenRegTensorData, receive_after_sending, safe_str, validate_send_queue_args, ) log = logging.getLogger(__name__) mp_context = torch.multiprocessing.get_context("spawn") NUM_DEVICES = 7 CURR_DEVICE_IDX = 0 CURR_STREAM = 0 class Allocator:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/cpp_extensions/open_registration_extension/pytorch_openreg/_device_daemon.py
_lazy_init
def _lazy_init(self): if self.is_initialized: return self.req_queue = mp_context.Queue() self.ans_queue = mp_context.Queue() self.runner = mp_context.Process( target=self.run_forever, args=(self.req_queue, self.ans_queue), daemon=True ) self.runner.start() self.is_initialized = True
import logging import torch from ._meta_parser import ( OpenRegTensorData, receive_after_sending, safe_str, validate_send_queue_args, ) log = logging.getLogger(__name__) mp_context = torch.multiprocessing.get_context("spawn") NUM_DEVICES = 7 CURR_DEVICE_IDX = 0 CURR_STREAM = 0 class _Daemon:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/cpp_extensions/open_registration_extension/pytorch_openreg/_device_daemon.py
run_forever
def run_forever(req_queue, ans_queue): # Initialize our device global CURR_DEVICE_IDX empty_res = object() allocator = Allocator() # Serve all requests while True: cmd, *args = req_queue.get() log.info("Worker executing: %s", cmd) res = empty_res if cmd == "deviceCount": assert len(args) == 0 res = NUM_DEVICES elif cmd == "getDevice": res = CURR_DEVICE_IDX elif cmd == "uncheckedSetDevice": assert len(args) == 1 CURR_DEVICE_IDX = int(args[0]) res = None elif cmd == "exchangeDevice": assert len(args) == 1 res = CURR_DEVICE_IDX CURR_DEVICE_IDX = int(args[0]) elif cmd == "malloc": res = allocator.malloc(*args) elif cmd == "free": res = allocator.free(*args) elif cmd == "run_op": op_name, args, kwargs = args run_op(allocator, op_name, args, kwargs) res = None elif cmd == "send_data": assert len(args) == 1 res = OpenRegTensorData.from_meta(allocator, args[0]) elif cmd == "recv_data": assert len(args) == 2 host_tensor, dev_mem = args dev_tensor = OpenRegTensorData.from_meta(allocator, dev_mem) dev_tensor.copy_(host_tensor) res = None elif cmd == "get_op_output_shape": op_name, args, kwargs = args res = run_op(allocator, op_name, args, kwargs).size() else: log.warning("Bad command in worker") res = "ERROR" if res == empty_res: raise RuntimeError("Bad impl didn't return anything") log.info("Worker answering to: %s", cmd) ans_queue.put(res)
import logging import torch from ._meta_parser import ( OpenRegTensorData, receive_after_sending, safe_str, validate_send_queue_args, ) log = logging.getLogger(__name__) mp_context = torch.multiprocessing.get_context("spawn") NUM_DEVICES = 7 CURR_DEVICE_IDX = 0 CURR_STREAM = 0 class _Daemon:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/cpp_extensions/open_registration_extension/pytorch_openreg/_meta_parser.py
__init__
def __init__(self, tensor, checked=True): if checked and not tensor.device.type == "openreg": raise RuntimeError( "Creating OpenRegTensorMeta is only for Tensors on openreg device" ) self.data_ptr = tensor.untyped_storage().data_ptr() self.size = tensor.size() self.stride = tensor.stride() self.storage_offset = tensor.storage_offset() self.dtype = tensor.dtype self.nelem_in_bytes = tensor.nelement() * tensor.element_size()
import pprint import torch from torch.utils._pytree import tree_map, tree_map_only class OpenRegTensorMeta:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/cpp_extensions/open_registration_extension/pytorch_openreg/_meta_parser.py
__repr__
def __repr__(self): return ( f"OpenRegTensorMeta({self.data_ptr=}, {self.size=}, {self.stride=}, " f"{self.storage_offset=}, {self.dtype=}, {self.nelem_in_bytes=})" )
import pprint import torch from torch.utils._pytree import tree_map, tree_map_only class OpenRegTensorMeta:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/cpp_extensions/open_registration_extension/pytorch_openreg/_meta_parser.py
from_meta
def from_meta(allocator, tensor_meta): return OpenRegTensorData(allocator.tensor_from_meta(tensor_meta))
import pprint import torch from torch.utils._pytree import tree_map, tree_map_only class OpenRegTensorData(torch.Tensor):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/cpp_extensions/open_registration_extension/pytorch_openreg/_meta_parser.py
convert
def convert(obj): if isinstance(obj, torch.Tensor): return str(OpenRegTensorMeta(obj, checked=False)) else: return obj new_args = tree_map(convert, args) return pprint.pformat(new_args)
import pprint import torch from torch.utils._pytree import tree_map, tree_map_only VALID_QUEUE_TYPES_IN = {torch.Tensor, int, float} VALID_QUEUE_TYPES_OUT = {OpenRegTensorMeta, int, float, str}
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/cpp_extensions/open_registration_extension/pytorch_openreg/_meta_parser.py
check
def check(obj): if type(obj) not in VALID_QUEUE_TYPES_OUT: if ( cmd == "recv_data" and type(obj) is torch.Tensor and obj.device.type == "cpu" ): # Only HtoD copy command can send cpu Tensors over return raise RuntimeError( f"Trying to send invalid object through queue: {type(obj)}" ) tree_map(check, args)
import pprint import torch from torch.utils._pytree import tree_map, tree_map_only VALID_QUEUE_TYPES_IN = {torch.Tensor, int, float} VALID_QUEUE_TYPES_OUT = {OpenRegTensorMeta, int, float, str}
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/bottleneck_test/test_cuda.py
__init__
def __init__(self): super().__init__() self.linear = nn.Linear(20, 20)
def __init__(self) -> None: super().__init__() self.linear = nn.Linear(20, 20)
import torch import torch.nn as nn class Model(nn.Module):
import torch import torch.nn as nn class Model(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/custom_operator/test_custom_ops.py
test_dynamo_pystub_suggestion
def test_dynamo_pystub_suggestion(self): x = torch.randn(3) @torch.compile(backend="eager", fullgraph=True) def f(x): return torch.ops.custom.asin(x) with self.assertRaisesRegex( RuntimeError, r"unsupported operator: .* you may need to `import nonexistent`", ): f(x)
import os.path import sys import tempfile import unittest from model import get_custom_op_library_path, Model import torch import torch._library.utils as utils from torch import ops from torch.testing._internal.common_utils import IS_WINDOWS, run_tests, TestCase class TestCustomOperators(TestCase): from functorch import make_fx
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/custom_operator/test_custom_ops.py
f
def f(x): return torch.ops.custom.asin(x) with self.assertRaisesRegex( RuntimeError, r"unsupported operator: .* you may need to `import nonexistent`", ): f(x)
import os.path import sys import tempfile import unittest from model import get_custom_op_library_path, Model import torch import torch._library.utils as utils from torch import ops from torch.testing._internal.common_utils import IS_WINDOWS, run_tests, TestCase from functorch import make_fx
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/custom_operator/test_custom_ops.py
test_abstract_impl_pystub_meta
def test_abstract_impl_pystub_meta(self): x = torch.randn(3, device="meta") self.assertNotIn("my_custom_ops2", sys.modules.keys()) with self.assertRaisesRegex(NotImplementedError, r"'my_custom_ops2'"): y = torch.ops.custom.sin.default(x) torch.ops.import_module("my_custom_ops2") y = torch.ops.custom.sin.default(x)
import os.path import sys import tempfile import unittest from model import get_custom_op_library_path, Model import torch import torch._library.utils as utils from torch import ops from torch.testing._internal.common_utils import IS_WINDOWS, run_tests, TestCase class TestCustomOperators(TestCase): from functorch import make_fx
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/cpp_extensions/torch_test_cpp_extension/__init__.py
_autoload
def _autoload(): # Set the environment variable to true in this entrypoint os.environ["IS_CUSTOM_DEVICE_BACKEND_IMPORTED"] = "1"
import os
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/create_dummy_torchscript_model.py
__init__
def __init__(self): super().__init__() self.flatten = nn.Flatten() self.linear_relu_stack = nn.Sequential( nn.Linear(28 * 28, 512), nn.ReLU(), nn.Linear(512, 512), nn.ReLU(), nn.Linear(512, 10), )
def __init__(self) -> None: super().__init__() self.flatten = nn.Flatten() self.linear_relu_stack = nn.Sequential( nn.Linear(28 * 28, 512), nn.ReLU(), nn.Linear(512, 512), nn.ReLU(), nn.Linear(512, 10), )
import sys import torch from torch import nn class NeuralNetwork(nn.Module):
import sys import torch from torch import nn class NeuralNetwork(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/custom_backend/backend.py
get_custom_backend_library_path
def get_custom_backend_library_path(): """ Get the path to the library containing the custom backend. Return: The path to the custom backend object, customized by platform. """ if sys.platform.startswith("win32"): library_filename = "custom_backend.dll" elif sys.platform.startswith("darwin"): library_filename = "libcustom_backend.dylib" else: library_filename = "libcustom_backend.so" path = os.path.abspath("build/{}".format(library_filename)) assert os.path.exists(path), path return path
def get_custom_backend_library_path(): """ Get the path to the library containing the custom backend. Return: The path to the custom backend object, customized by platform. """ if sys.platform.startswith("win32"): library_filename = "custom_backend.dll" elif sys.platform.startswith("darwin"): library_filename = "libcustom_backend.dylib" else: library_filename = "libcustom_backend.so" path = os.path.abspath(f"build/{library_filename}") assert os.path.exists(path), path return path
import argparse import os.path import sys import torch
import argparse import os.path import sys import torch
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/custom_operator/model.py
get_custom_op_library_path
def get_custom_op_library_path(): if sys.platform.startswith("win32"): library_filename = "custom_ops.dll" elif sys.platform.startswith("darwin"): library_filename = "libcustom_ops.dylib" else: library_filename = "libcustom_ops.so" path = os.path.abspath("build/{}".format(library_filename)) assert os.path.exists(path), path return path class Model(torch.jit.ScriptModule): def __init__(self): super().__init__() self.p = torch.nn.Parameter(torch.eye(5)) @torch.jit.script_method def forward(self, input): return torch.ops.custom.op_with_defaults(input)[0] + 1
def get_custom_op_library_path(): if sys.platform.startswith("win32"): library_filename = "custom_ops.dll" elif sys.platform.startswith("darwin"): library_filename = "libcustom_ops.dylib" else: library_filename = "libcustom_ops.so" path = os.path.abspath(f"build/{library_filename}") assert os.path.exists(path), path return path class Model(torch.jit.ScriptModule): def __init__(self) -> None: super().__init__() self.p = torch.nn.Parameter(torch.eye(5)) @torch.jit.script_method def forward(self, input): return torch.ops.custom.op_with_defaults(input)[0] + 1
import argparse import os.path import sys import torch
import argparse import os.path import sys import torch
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
test_nested_fully_shard_backend_inductor
def test_nested_fully_shard_backend_inductor(self): for fullgraph in [True, False]: with self._reinplace_all_gather_with_optional_checks( fullgraph ), self._maybe_run_decide_global_ordering_of_comms_with_checks(fullgraph): _, triton_codes = run_and_get_code( lambda: self._test_traceable_fsdp( *self._create_nested_fully_shard_factory_fns( fullgraph=fullgraph ), "inductor", fullgraph=fullgraph, ) ) if fullgraph: self.assertTrue( len(triton_codes) == 2, "Expected two separate lowerings to Triton code, one from FWD graph and one from Compiled Autograd BWD graph", ) fwd_code = triton_codes[0] file_check = FileCheck().check("def call(args):") for fwd_ag_block_info in [ dict(overlapped_compute_op_str=None, num_resize=0, num_set=2), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, last_all_gather=True, ), ]: file_check = self.inductor_code_check_fsdp_all_gather( file_check, **fwd_ag_block_info ) file_check.run(fwd_code) bwd_code = triton_codes[1] file_check = FileCheck().check("def call(args):") for bwd_ag_block_info in [ dict(overlapped_compute_op_str=None, num_resize=0, num_set=2), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=0, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=0, num_set=2, last_all_gather=True, ), ]: file_check = self.inductor_code_check_fsdp_all_gather( file_check, **bwd_ag_block_info ) for bwd_rs_block_info in [ dict(overlapped_compute_op_str="extern_kernels.mm("), dict( overlapped_compute_op_str=None ), # TODO: improve compute/comm overlap, so that `overlapped_compute_op_str` is not None dict(overlapped_compute_op_str=None), ]: file_check = self.inductor_code_check_fsdp_reduce_scatter( file_check, **bwd_rs_block_info ) file_check.run(bwd_code) else: # TODO: when fullgraph=False and there is graph break in FWD graph, # there are several recompiles, need to figure out why. self.assertTrue( len(triton_codes) > 2, "Expected at least 3 separate lowerings to Triton code, which means at least 1 graph break in FWD graph", )
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton class TestFullyShardCompile(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
_create_transformer_factory_fns
def _create_transformer_factory_fns(self): seq_len = 16 vocab_size = 8 def model_init_fn(): torch.manual_seed(self.rank) fsdp_config = {} mesh = init_device_mesh("cuda", (self.world_size,)) model_args = ModelArgs( vocab_size=vocab_size, n_layers=3, ) model = Transformer(model_args) for layer_id, mod in enumerate(model.layers): fully_shard(mod, mesh=mesh, reshard_after_forward=True, **fsdp_config) model = fully_shard( model, mesh=mesh, reshard_after_forward=True, **fsdp_config ) optim = torch.optim.SGD(model.parameters(), lr=1e-4) return model, optim def input_creation_fn(): torch.manual_seed(self.rank) inp = torch.randint( 0, vocab_size, (2, seq_len), device="cuda", requires_grad=False ) return inp return model_init_fn, input_creation_fn
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton class TestFullyShardCompile(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
model_init_fn
def model_init_fn(): torch.manual_seed(self.rank) fsdp_config = {} model = nn.Sequential( nn.Linear(hidden_dim, hidden_dim, device="cuda"), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim, device="cuda"), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim, device="cuda"), ) fully_shard(model, reshard_after_forward=True, **fsdp_config) optim = torch.optim.SGD(model.parameters(), lr=1e-4) return model, optim
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
input_creation_fn
def input_creation_fn(): torch.manual_seed(self.rank) inp = torch.randn((2, hidden_dim), device="cuda", requires_grad=False) return inp return model_init_fn, input_creation_fn
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
_sdpa_with_graph_break
def _sdpa_with_graph_break(orig_fn, fullgraph, *args, **kwargs): if not fullgraph: torch._dynamo.graph_break() return orig_fn(*args, **kwargs) return mock.patch.object( F, "scaled_dot_product_attention", functools.partial( _sdpa_with_graph_break, F.scaled_dot_product_attention, fullgraph, ), )
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
test_transformer_backend_inductor
def test_transformer_backend_inductor(self): for fullgraph in [True, False]: with self._maybe_add_graph_break_to_sdpa( fullgraph ), self._reinplace_all_gather_with_optional_checks( fullgraph ), self._maybe_run_decide_global_ordering_of_comms_with_checks( fullgraph ): _, triton_codes = run_and_get_code( lambda: self._test_traceable_fsdp( *self._create_transformer_factory_fns(), "inductor", fullgraph=fullgraph, ) ) if fullgraph: self.assertTrue( len(triton_codes) == 2, "Expected two separate lowerings to Triton code, one from FWD graph and one from Compiled Autograd BWD graph", ) fwd_code = triton_codes[0] file_check = FileCheck().check("def call(args):") for fwd_ag_block_info in [ dict(overlapped_compute_op_str="triton_", num_resize=0, num_set=4), dict( overlapped_compute_op_str="aten.native_dropout.", num_resize=0, num_set=12, ), dict( overlapped_compute_op_str="aten._scaled_dot_product_efficient_attention.", num_resize=12, num_set=12, ), dict( overlapped_compute_op_str="aten._scaled_dot_product_efficient_attention.", num_resize=12, num_set=12, last_all_gather=True, ), ]: file_check = self.inductor_code_check_fsdp_all_gather( file_check, **fwd_ag_block_info ) file_check.run(fwd_code) bwd_code = triton_codes[1] file_check = FileCheck().check("def call(args):") for bwd_ag_block_info in [ dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=0, num_set=12, ), dict( overlapped_compute_op_str="aten._scaled_dot_product_efficient_attention_backward.", num_resize=0, num_set=12, ), dict( overlapped_compute_op_str="aten._scaled_dot_product_efficient_attention_backward.", num_resize=0, num_set=12, last_all_gather=True, ), ]: file_check = self.inductor_code_check_fsdp_all_gather( file_check, **bwd_ag_block_info ) for bwd_rs_block_info in [ dict(overlapped_compute_op_str="extern_kernels.mm("), dict( overlapped_compute_op_str=None ), # TODO: improve compute/comm overlap, so that `overlapped_compute_op_str` is not None dict(overlapped_compute_op_str=None), dict(overlapped_compute_op_str=None), ]: file_check = self.inductor_code_check_fsdp_reduce_scatter( file_check, **bwd_rs_block_info ) file_check.run(bwd_code) else: # TODO: when fullgraph=False and there is graph break in FWD graph, # there are several recompiles, need to figure out why. self.assertTrue( len(triton_codes) > 2, "Expected at least 3 separate lowerings to Triton code, which means at least 1 graph break in FWD graph", )
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton class TestFullyShardCompile(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_extensions.py
_patch_two_tensor_fsdp_all_gather
def _patch_two_tensor_fsdp_all_gather(self): lock = threading.Lock() TwoTensor.fsdp_pre_all_gather = two_tensor_fsdp_pre_all_gather TwoTensor.fsdp_post_all_gather = two_tensor_fsdp_post_all_gather dist.barrier() try: yield finally: dist.barrier() with lock: # only one thread needs to delete if hasattr(TwoTensor, "fsdp_pre_all_gather"): delattr(TwoTensor, "fsdp_pre_all_gather") if hasattr(TwoTensor, "fsdp_post_all_gather"): delattr(TwoTensor, "fsdp_post_all_gather")
import contextlib import copy import functools import threading import unittest from typing import Any, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed.device_mesh import DeviceMesh from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.two_tensor import TwoTensor class TestFullyShardAllGatherExtensionsCommon:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_extensions.py
test_all_gather_extensions_train_parity
def test_all_gather_extensions_train_parity(self): with self._patch_two_tensor_fsdp_all_gather(): self.run_subtests( {"reshard_after_forward": [True, False]}, self._test_all_gather_extensions_train_parity, )
import contextlib import copy import functools import threading import unittest from typing import Any, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed.device_mesh import DeviceMesh from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.two_tensor import TwoTensor
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
test_managed_modules_list_of_mlps
def test_managed_modules_list_of_mlps(self): model = nn.Sequential(*[MLP(8) for _ in range(5)]) # Assume calling `fully_shard` on `[model[0], model[1], model[2]]` managed_modules = _get_managed_modules((model[0], model[1], model[2])) expected_managed_modules = ( list(model[0].modules()) + list(model[1].modules()) + list(model[2].modules()) ) self._check_managed_modules(managed_modules, expected_managed_modules) # Assume calling `fully_shard` on `[model[1], model[3]]` managed_modules = _get_managed_modules((model[1], model[3])) expected_managed_modules = list(model[1].modules()) + list(model[3].modules())
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardManagedModulesAndStates(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
test_managed_states_list_of_mlps
def test_managed_states_list_of_mlps(self): model = nn.Sequential(*[MLP(8, with_buffer=True) for _ in range(5)]) # Assume calling `fully_shard` on `[model[0], model[1], model[2]]` managed_modules = _get_managed_modules((model[0], model[1], model[2])) params, buffers = _get_managed_states(managed_modules) expected_params = ( list(model[0].parameters()) + list(model[1].parameters()) + list(model[2].parameters()) ) expected_buffers = ( list(model[0].buffers()) + list(model[1].buffers()) + list(model[2].buffers()) ) self._check_managed_states(params, buffers, expected_params, expected_buffers)
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardManagedModulesAndStates(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
test_get_param_module_infos_list_of_mlps
def test_get_param_module_infos_list_of_mlps(self): model = nn.Sequential(*[MLP(8) for _ in range(2)]) managed_modules = _get_managed_modules((model[0], model[1])) params, _ = _get_managed_states(managed_modules) param_module_infos = _get_param_module_infos(params, model) self.assertEqual(len(param_module_infos), len(params)) expected_param_module_infos = [ ParamModuleInfo(model[0].in_proj, "weight", [], []), ParamModuleInfo(model[0].in_proj, "bias", [], []), ParamModuleInfo(model[0].out_proj, "weight", [], []), ParamModuleInfo(model[0].out_proj, "bias", [], []), ParamModuleInfo(model[1].in_proj, "weight", [], []), ParamModuleInfo(model[1].in_proj, "bias", [], []), ParamModuleInfo(model[1].out_proj, "weight", [], []), ParamModuleInfo(model[1].out_proj, "bias", [], []), ] self.assertEqual(len(param_module_infos), len(expected_param_module_infos)) self.assertEqual(param_module_infos, expected_param_module_infos)
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardParamModuleInfos(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
test_shard_tensor_parameters
def test_shard_tensor_parameters(self): # Use odd dim sizes to test uneven shards model = nn.Sequential(*[MLP(3, dim_multiplier=3) for _ in range(3)]) orig_params = [param.detach().clone() for param in model.parameters()] fully_shard(model) sharded_params = list(model.parameters()) self._check_1d_sharded_parameters(orig_params, sharded_params) model = nn.Sequential(*[MLP(3, dim_multiplier=3) for _ in range(3)]) model[0].in_proj = model[1].in_proj orig_params = [param.detach().clone() for param in model.parameters()] fully_shard(model) sharded_params = list(model.parameters()) self._check_1d_sharded_parameters(orig_params, sharded_params)
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardShardedParameterTensor(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/custom_operator/test_infer_schema_annotation.py
test_tensor
def test_tensor(self): def foo_op(x: torch.Tensor) -> torch.Tensor: return x.clone() result = torch.library.infer_schema(foo_op, mutates_args=mutates_args) self.assertEqual(result, "(Tensor x) -> Tensor") def foo_op_2(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return x.clone() + y result = torch.library.infer_schema(foo_op_2, mutates_args=mutates_args) self.assertEqual(result, "(Tensor x, Tensor y) -> Tensor")
from __future__ import annotations import typing from typing import List, Optional, Sequence, Union # noqa: F401 import torch from torch import Tensor, types from torch.testing._internal.common_utils import run_tests, TestCase mutates_args = {} class TestInferSchemaWithAnnotation(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/custom_operator/test_infer_schema_annotation.py
test_native_types
def test_native_types(self): def foo_op(x: int) -> int: return x result = torch.library.infer_schema(foo_op, mutates_args=mutates_args) self.assertEqual(result, "(SymInt x) -> SymInt") def foo_op_2(x: bool) -> bool: return x result = torch.library.infer_schema(foo_op_2, mutates_args=mutates_args) self.assertEqual(result, "(bool x) -> bool") def foo_op_3(x: str) -> int: return 1 result = torch.library.infer_schema(foo_op_3, mutates_args=mutates_args) self.assertEqual(result, "(str x) -> SymInt") def foo_op_4(x: float) -> float: return x result = torch.library.infer_schema(foo_op_4, mutates_args=mutates_args) self.assertEqual(result, "(float x) -> float")
from __future__ import annotations import typing from typing import List, Optional, Sequence, Union # noqa: F401 import torch from torch import Tensor, types from torch.testing._internal.common_utils import run_tests, TestCase mutates_args = {} class TestInferSchemaWithAnnotation(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/custom_operator/test_infer_schema_annotation.py
test_torch_types
def test_torch_types(self): def foo_op_1(x: torch.types.Number) -> torch.types.Number: return x result = torch.library.infer_schema(foo_op_1, mutates_args=mutates_args) self.assertEqual(result, "(Scalar x) -> Scalar") def foo_op_2(x: torch.dtype) -> int: return 1 result = torch.library.infer_schema(foo_op_2, mutates_args=mutates_args) self.assertEqual(result, "(ScalarType x) -> SymInt") def foo_op_3(x: torch.device) -> int: return 1 result = torch.library.infer_schema(foo_op_3, mutates_args=mutates_args) self.assertEqual(result, "(Device x) -> SymInt")
from __future__ import annotations import typing from typing import List, Optional, Sequence, Union # noqa: F401 import torch from torch import Tensor, types from torch.testing._internal.common_utils import run_tests, TestCase mutates_args = {} class TestInferSchemaWithAnnotation(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/custom_operator/test_infer_schema_annotation.py
test_type_variants
def test_type_variants(self): def foo_op_1(x: typing.Optional[int]) -> int: return 1 result = torch.library.infer_schema(foo_op_1, mutates_args=mutates_args) self.assertEqual(result, "(SymInt? x) -> SymInt") def foo_op_2(x: typing.Sequence[int]) -> int: return 1 result = torch.library.infer_schema(foo_op_2, mutates_args=mutates_args) self.assertEqual(result, "(SymInt[] x) -> SymInt") def foo_op_3(x: typing.List[int]) -> int: return 1 result = torch.library.infer_schema(foo_op_3, mutates_args=mutates_args) self.assertEqual(result, "(SymInt[] x) -> SymInt") def foo_op_4(x: typing.Optional[typing.Sequence[int]]) -> int: return 1 result = torch.library.infer_schema(foo_op_4, mutates_args=mutates_args) self.assertEqual(result, "(SymInt[]? x) -> SymInt") def foo_op_5(x: typing.Optional[typing.List[int]]) -> int: return 1 result = torch.library.infer_schema(foo_op_5, mutates_args=mutates_args) self.assertEqual(result, "(SymInt[]? x) -> SymInt") def foo_op_6(x: typing.Union[int, float, bool]) -> types.Number: return x result = torch.library.infer_schema(foo_op_6, mutates_args=mutates_args) self.assertEqual(result, "(Scalar x) -> Scalar") def foo_op_7(x: typing.Union[int, bool, float]) -> types.Number: return x result = torch.library.infer_schema(foo_op_7, mutates_args=mutates_args) self.assertEqual(result, "(Scalar x) -> Scalar")
from __future__ import annotations import typing from typing import List, Optional, Sequence, Union # noqa: F401 import torch from torch import Tensor, types from torch.testing._internal.common_utils import run_tests, TestCase mutates_args = {} class TestInferSchemaWithAnnotation(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/custom_operator/test_infer_schema_annotation.py
test_no_library_prefix
def test_no_library_prefix(self): def foo_op(x: Tensor) -> Tensor: return x.clone() result = torch.library.infer_schema(foo_op, mutates_args=mutates_args) self.assertEqual(result, "(Tensor x) -> Tensor") def foo_op_2(x: Tensor) -> torch.Tensor: return x.clone() result = torch.library.infer_schema(foo_op_2, mutates_args=mutates_args) self.assertEqual(result, "(Tensor x) -> Tensor") def foo_op_3(x: torch.Tensor) -> Tensor: return x.clone() result = torch.library.infer_schema(foo_op_3, mutates_args=mutates_args) self.assertEqual(result, "(Tensor x) -> Tensor") def foo_op_4(x: List[int]) -> types.Number: return x[0] result = torch.library.infer_schema(foo_op_4, mutates_args=mutates_args) self.assertEqual(result, "(SymInt[] x) -> Scalar") def foo_op_5(x: Optional[int]) -> int: return 1 result = torch.library.infer_schema(foo_op_5, mutates_args=mutates_args) self.assertEqual(result, "(SymInt? x) -> SymInt") def foo_op_6(x: Sequence[int]) -> int: return 1 result = torch.library.infer_schema(foo_op_6, mutates_args=mutates_args) self.assertEqual(result, "(SymInt[] x) -> SymInt") def foo_op_7(x: List[int]) -> int: return 1 result = torch.library.infer_schema(foo_op_7, mutates_args=mutates_args) self.assertEqual(result, "(SymInt[] x) -> SymInt") def foo_op_8(x: Optional[Sequence[int]]) -> int: return 1 result = torch.library.infer_schema(foo_op_8, mutates_args=mutates_args) self.assertEqual(result, "(SymInt[]? x) -> SymInt") def foo_op_9(x: Optional[List[int]]) -> int: return 1 result = torch.library.infer_schema(foo_op_9, mutates_args=mutates_args) self.assertEqual(result, "(SymInt[]? x) -> SymInt") def foo_op_10(x: Union[int, float, bool]) -> types.Number: return x result = torch.library.infer_schema(foo_op_10, mutates_args=mutates_args) self.assertEqual(result, "(Scalar x) -> Scalar") def foo_op_11(x: Union[int, bool, float]) -> types.Number: return x result = torch.library.infer_schema(foo_op_11, mutates_args=mutates_args) self.assertEqual(result, "(Scalar x) -> Scalar")
from __future__ import annotations import typing from typing import List, Optional, Sequence, Union # noqa: F401 import torch from torch import Tensor, types from torch.testing._internal.common_utils import run_tests, TestCase mutates_args = {} class TestInferSchemaWithAnnotation(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/custom_operator/test_infer_schema_annotation.py
test_unsupported_annotation
def test_unsupported_annotation(self): with self.assertRaisesRegex( ValueError, r"Unsupported type annotation D. It is not a type.", ): def foo_op(x: D) -> Tensor: # noqa: F821 return torch.Tensor(x) torch.library.infer_schema(foo_op, mutates_args=mutates_args) with self.assertRaisesRegex( ValueError, r"Unsupported type annotation E. It is not a type.", ): def foo_op_2(x: Tensor) -> E: # noqa: F821 return x torch.library.infer_schema(foo_op_2, mutates_args=mutates_args)
from __future__ import annotations import typing from typing import List, Optional, Sequence, Union # noqa: F401 import torch from torch import Tensor, types from torch.testing._internal.common_utils import run_tests, TestCase mutates_args = {} class TestInferSchemaWithAnnotation(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_autograd.py
test_unused_forward_output
def test_unused_forward_output(self): """ Tests that gradients propagate when running a backward where some forward output is not used to compute the loss, motivated by: https://github.com/pytorch/pytorch/pull/83195 """ self.run_subtests( {"reshard_after_forward": [True, False, 2]}, self._test_unused_forward_output, )
import collections import copy import functools import itertools import unittest from typing import Any, List, Optional, Type, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.nn.parallel.scatter_gather import _is_namedtuple from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) class TestFullyShardAutograd(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
test_meta_device_2d_init
def test_meta_device_2d_init(self): assert self.world_size >= 4, f"{self.world_size}" dp_size = 2 global_mesh = init_device_mesh( "cuda", (dp_size, self.world_size // dp_size), mesh_dim_names=("dp", "tp") ) dp_mesh, tp_mesh = global_mesh["dp"], global_mesh["tp"] # Test both even sharding (8) and uneven sharding (3) for mlp_dim in (8, 3): with torch.device("meta"): model = MLP(mlp_dim, with_buffer=True) for param in model.parameters(): self.assertEqual(param.device, torch.device("meta")) parallelize_module( model, tp_mesh, {"in_proj": ColwiseParallel(), "out_proj": RowwiseParallel()}, ) for param in model.parameters(): self.assertEqual(param.device, torch.device("meta")) fully_shard(model.in_proj, mesh=dp_mesh) fully_shard(model.out_proj, mesh=dp_mesh) fully_shard(model, mesh=dp_mesh) for param in model.parameters(): self.assertEqual(param.device, torch.device("meta")) self._test_to_empty_and_reset_parameters(model, global_mesh, mlp_dim)
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardMetaDeviceInit(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
test_rank0_broadcast_meta_device_init
def test_rank0_broadcast_meta_device_init(self): model_args = ModelArgs(dropout_p=0.0) # Assume we have a CPU full state dict on rank 0 if self.rank == 0: torch.manual_seed(42) ref_model = Transformer(model_args) full_sd = ref_model.state_dict() for param in full_sd.values(): self.assertEqual(param.device, torch.device("cpu")) # Initialize the sharded model on meta device fsdp_mesh = init_device_mesh("cuda", (self.world_size,)) with torch.device("meta"): model = Transformer(model_args) for module in model.modules(): if isinstance(module, TransformerBlock): fully_shard(module, mesh=fsdp_mesh) fully_shard(model, mesh=fsdp_mesh) for param in model.parameters(): self.assertEqual(param.device, torch.device("meta")) # Construct a sharded state dict from the rank 0 full state dict by # broadcasting and sharding meta_sharded_sd = model.state_dict() sharded_sd = {} if self.rank == 0: self.assertEqual(len(meta_sharded_sd), len(full_sd)) self.assertEqual(list(meta_sharded_sd.keys()), list(full_sd.keys())) for (param_name, full_param), sharded_meta_param in zip( full_sd.items(), meta_sharded_sd.values() ): full_param = full_param.detach().cuda() mesh = sharded_meta_param.device_mesh dist.broadcast(full_param, src=0, group=mesh.get_group(0)) sharded_tensor = distribute_tensor( full_param, mesh, sharded_meta_param.placements ) sharded_sd[param_name] = nn.Parameter(sharded_tensor) else: for param_name, sharded_meta_param in meta_sharded_sd.items(): full_tensor = torch.empty( sharded_meta_param.size(), device="cuda", dtype=sharded_meta_param.dtype, ) mesh = sharded_meta_param.device_mesh dist.broadcast(full_tensor, src=0, group=mesh.get_group(0)) sharded_tensor = distribute_tensor( full_tensor, mesh, sharded_meta_param.placements ) sharded_sd[param_name] = nn.Parameter(sharded_tensor) model.load_state_dict(sharded_sd, assign=True) for param in model.parameters(): self.assertIsInstance(param, DTensor) self.assertEqual(param.device.type, "cuda") # Construct the reference model on nonzero ranks by broadcasting the # unsharded model from rank 0 and sharding on all ranks if self.rank != 0: ref_model = Transformer(model_args) for param in ref_model.parameters(): torch.distributed.broadcast(param.detach(), src=0) for module in ref_model.modules(): if isinstance(module, TransformerBlock): fully_shard(module, mesh=fsdp_mesh) fully_shard(ref_model, mesh=fsdp_mesh) for (param_name, param), (ref_param_name, ref_param) in zip( model.named_parameters(), ref_model.named_parameters() ): self.assertEqual(param_name, ref_param_name) self.assertEqual(param, ref_param) # Check one forward/backward for parity inp = torch.randint(0, model_args.vocab_size, (2, 16), device="cuda") loss = model(inp).sum() loss.backward() ref_loss = ref_model(inp).sum() ref_loss.backward() self.assertEqual(loss, ref_loss) for param, ref_param in zip(model.parameters(), ref_model.parameters()): self.assertEqual(param.grad, ref_param.grad)
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardMetaDeviceInit(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
test_2d_process_group_init
def test_2d_process_group_init(self): shard_mesh_dim_size = 2 assert ( self.world_size % shard_mesh_dim_size == 0 ), f"Expects {self.world_size} to be divisible by {shard_mesh_dim_size}" replicate_mesh_dim_size = self.world_size // shard_mesh_dim_size mesh_dim_names = ("replicate", "shard") ref_mesh = init_device_mesh( "cuda", (replicate_mesh_dim_size, shard_mesh_dim_size), mesh_dim_names=mesh_dim_names, ) # Use the global PG as the parent group (in practice, this could be a # subgroup of the global PG) dp_group = dist.distributed_c10d._get_default_group() dp_shard_group = _init_intra_node_process_group(shard_mesh_dim_size) dp_replicate_group = _init_inter_node_process_group( dp_group, replicate_mesh_dim_size ) mesh_tensor = torch.tensor( dist.get_process_group_ranks(dp_group), dtype=torch.int ).view(replicate_mesh_dim_size, shard_mesh_dim_size) # Check the `from_group()` API for correctness mesh = DeviceMesh.from_group( [dp_replicate_group, dp_shard_group], "cuda", mesh_dim_names=mesh_dim_names, mesh=mesh_tensor, ) self.assertEqual(mesh.mesh, ref_mesh.mesh) self.assertEqual(mesh._coordinate_on_dim, ref_mesh._coordinate_on_dim) for (tag, ranks, group_name), (ref_tag, ref_ranks, ref_group_name) in zip( mesh._dim_group_infos, ref_mesh._dim_group_infos ): # Since we manually constructed new subgroups, the test and ref # groups are not the same self.assertEqual(ranks, ref_ranks) for mesh_dim_name in mesh_dim_names: child_mesh = mesh[mesh_dim_name] ref_child_mesh = ref_mesh[mesh_dim_name] self.assertEqual(child_mesh, ref_child_mesh) child_ranks = dist.distributed_c10d.get_process_group_ranks( child_mesh.get_group() ) ref_child_ranks = dist.distributed_c10d.get_process_group_ranks( ref_child_mesh.get_group() ) self.assertEqual(child_ranks, ref_child_ranks) # Check HSDP forward/backward parity torch.manual_seed(42) mlp_dim = 8 ref_model = MLP(mlp_dim) for param in ref_model.parameters(): dist.broadcast(param.detach(), src=0) model = copy.deepcopy(ref_model) # Parallelize the test model with the ref mesh for module in (ref_model.in_proj, ref_model.out_proj, ref_model): fully_shard(module, mesh=ref_mesh) # Parallelize the test model with the new mesh from the PG for module in (model.in_proj, model.out_proj, model): fully_shard(module, mesh=mesh) inp = torch.randn((4, mlp_dim), device="cuda") ref_loss = ref_model(inp).sum() ref_loss.backward() loss = model(inp).sum() loss.backward() self.assertEqual(loss, ref_loss) for param, ref_param in zip(model.parameters(), ref_model.parameters()): self.assertEqual(param, ref_param) self.assertEqual(param.grad, ref_param.grad)
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardProcessGroupInit(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
test_hsdp_broadcast_across_replicas
def test_hsdp_broadcast_across_replicas(self): shard_size, replicate_size = 2, 2 mesh = init_device_mesh( "cuda", (replicate_size, shard_size), mesh_dim_names=("replicate", "shard") ) model_args = ModelArgs() model = Transformer(model_args) # Add a buffer to show that this flow works for buffers too model.buf = torch.nn.Buffer(torch.randn((model_args.dim,))) for module in model.modules(): if isinstance(module, TransformerBlock): fully_shard(module, mesh=mesh) fully_shard(model, mesh=mesh) # Only preserve the model states on the replicate mesh's rank 0 if mesh.get_local_rank("replicate") > 0: for tensor in itertools.chain(model.parameters(), model.buffers()): tensor.detach().fill_(1337) # Check that replicas are different for tensor in itertools.chain(model.parameters(), model.buffers()): local_tensor = tensor.to_local() if isinstance(tensor, DTensor) else tensor local_tensor_list = [ torch.empty_like(local_tensor) for _ in range(mesh["replicate"].size()) ] dist.all_gather( local_tensor_list, local_tensor, group=mesh.get_group("replicate") ) for other_local_tensor in local_tensor_list[1:]: self.assertEqual(other_local_tensor.shape, local_tensor_list[0].shape) self.assertNotEqual(other_local_tensor, local_tensor_list[0]) # Broadcast from replicate mesh's rank 0 replicate_group = mesh.get_group("replicate") for tensor in itertools.chain(model.parameters(), model.buffers()): # E.g. for mesh [[0, 1, 2, 3], [4, 5, 6, 7]] sharding on dim-1 and # replicating on dim-0, broadcast with sources 0, 1, 2, 3 src_rank = dist.get_process_group_ranks(replicate_group)[0] torch.distributed.broadcast( tensor.to_local() if isinstance(tensor, DTensor) else tensor, src=src_rank, group=replicate_group, ) # Check that replicas are the same for tensor in itertools.chain(model.parameters(), model.buffers()): local_tensor = tensor.to_local() if isinstance(tensor, DTensor) else tensor local_tensor_list = [ torch.empty_like(local_tensor) for _ in range(mesh["replicate"].size()) ] dist.all_gather( local_tensor_list, local_tensor, group=mesh.get_group("replicate") ) for other_local_tensor in local_tensor_list[1:]: self.assertEqual(other_local_tensor, local_tensor_list[0]) # Check that we can run an iteration without erroring inp = torch.randint(0, model_args.vocab_size, (2, 16), device="cuda") model(inp).sum().backward()
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardHSDPBroadcast(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_memory.py
test_fully_shard_training_memory
def test_fully_shard_training_memory(self): self.run_subtests( { "reshard_after_forward": [True, False], "use_cpu_offload": [True, False], "run_optim_in_backward": [True, False], }, self._test_fully_shard_training_memory, )
import functools import torch from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, fully_shard, OffloadPolicy, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardMemory(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
test_compute_dtype
def test_compute_dtype(self): self.run_subtests( { "param_dtype": [torch.bfloat16, torch.float16], "reshard_after_forward": [False, True, 2], }, self._test_compute_dtype, )
import copy import functools from typing import Dict, List, Optional, Union import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed._composable.fsdp._fsdp_collectives import ( _get_gradient_divide_factors, ) from torch.testing._internal.common_distributed import ( requires_nccl_version, SaveForwardInputsModel, skip_if_lt_x_gpu, ) from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, patch_reduce_scatter, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests class TestFullyShardMixedPrecisionTraining(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
assert_fn
def assert_fn(output: torch.Tensor): self.assertEqual(output.dtype, param_dtype) reduce_scatter = functools.partial( reduce_scatter_with_assert, self, orig_reduce_scatter, assert_fn ) predivide_factor, postdivide_factor = _get_gradient_divide_factors( self.process_group, all_reduce_group=None, reduce_dtype=param_dtype ) torch.manual_seed(42 + self.rank + 1) inp = torch.randn((4, 16), device="cuda", dtype=param_dtype) for iter_idx in range(10): optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) fsdp_loss = model(inp).sum() with patch_reduce_scatter(reduce_scatter): fsdp_loss.backward() optim.step() ref_optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) ref_loss = ref_model_bf16(inp.to(param_dtype)).sum() ref_loss.backward() for param in ref_model_bf16.parameters(): # Use reduce-scatter -> all-gather as all-reduce because for # world size >=4, NCCL all-reduce shows numeric differences # compared with NCCL reduce-scatter if predivide_factor is not None and predivide_factor > 1: param.grad.div_(predivide_factor) elif predivide_factor is None: param.grad.div_(self.world_size) output = torch.zeros_like(torch.chunk(param.grad, self.world_size)[0]) dist.reduce_scatter_tensor(output, param.grad) dist.all_gather_into_tensor(param.grad, output) if postdivide_factor is not None and postdivide_factor > 1: param.grad.div_(postdivide_factor) for param_fp32, param_bf16 in zip( ref_model.parameters(), ref_model_bf16.parameters() ): param_fp32.grad = param_bf16.grad.to(param_fp32.dtype) param_bf16.grad = None ref_optim.step() # fp32 optimizer step for param_fp32, param_bf16 in zip( ref_model.parameters(), ref_model_bf16.parameters() ): param_bf16.detach().copy_(param_fp32) self.assertEqual(fsdp_loss, ref_loss) check_sharded_parity(self, ref_model, model)
import copy import functools from typing import Dict, List, Optional, Union import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed._composable.fsdp._fsdp_collectives import ( _get_gradient_divide_factors, ) from torch.testing._internal.common_distributed import ( requires_nccl_version, SaveForwardInputsModel, skip_if_lt_x_gpu, ) from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, patch_reduce_scatter, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_autograd.py
test_post_acc_grad_hook_runs
def test_post_acc_grad_hook_runs(self): param_name_to_hook_count = collections.defaultdict(int) def hook(param_name: str, param: torch.Tensor) -> None: nonlocal param_name_to_hook_count param_name_to_hook_count[param_name] += 1 model = MLP(8) for module in (model.in_proj, model.out_proj, model): fully_shard(module) for param_name, param in model.named_parameters(): param_hook = functools.partial(hook, param_name) param.register_post_accumulate_grad_hook(param_hook) inp = torch.randn((2, 8), device="cuda") model(inp).sum().backward() param_names = {param_name for param_name, _ in model.named_parameters()} self.assertEqual(param_names, set(param_name_to_hook_count.keys())) for param_name, count in param_name_to_hook_count.items(): self.assertEqual(count, 1)
import collections import copy import functools import itertools import unittest from typing import Any, List, Optional, Type, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.nn.parallel.scatter_gather import _is_namedtuple from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) class TestFullyShardPostAccGradHookMultiThread(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_autograd.py
test_post_acc_grad_hook_optim_parity
def test_post_acc_grad_hook_optim_parity(self): """ Tests parity of running the optimizer via the post-accumulate-grad hook vs. normally. """ torch.manual_seed(42) model_args = ModelArgs(dropout_p=0.0) model = Transformer(model_args) ref_model = copy.deepcopy(model).cuda() for module in itertools.chain(ref_model.layers, [ref_model]): fully_shard(module) optim_kwargs = {"lr": 1e-2, "foreach": False} ref_optim = torch.optim.AdamW(ref_model.parameters(), **optim_kwargs) lr_scheduler_kwargs = {"step_size": 5} ref_lr_scheduler = torch.optim.lr_scheduler.StepLR( ref_optim, **lr_scheduler_kwargs ) for module in itertools.chain(model.layers, [model]): fully_shard(module) param_to_optim = {} param_to_lr_scheduler = {} for param in model.parameters(): param_to_optim[param] = torch.optim.AdamW([param], **optim_kwargs) param_to_lr_scheduler[param] = torch.optim.lr_scheduler.StepLR( param_to_optim[param], **lr_scheduler_kwargs ) def optim_hook(param: nn.Parameter) -> None: param_to_optim[param].step() param_to_optim[param].zero_grad() param_to_lr_scheduler[param].step() for param in model.parameters(): param.register_post_accumulate_grad_hook(optim_hook) torch.manual_seed(42 + self.rank) inp = torch.randint(0, model_args.vocab_size, (2, 16), device="cuda") for _ in range(10): ref_loss = ref_model(inp).sum() ref_loss.backward() ref_optim.step() ref_optim.zero_grad() ref_lr_scheduler.step() loss = model(inp).sum() loss.backward() self.assertTrue(torch.equal(ref_loss, loss)) for ref_param, param in zip(ref_model.parameters(), model.parameters()): self.assertTrue(torch.equal(ref_param, param))
import collections import copy import functools import itertools import unittest from typing import Any, List, Optional, Type, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.nn.parallel.scatter_gather import _is_namedtuple from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) class TestFullyShardPostAccGradHookMultiProcess(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_clip_grad_norm_.py
test_clip_grad_norm_1d
def test_clip_grad_norm_1d(self): for norm_type in (2, 1, float("inf")): torch.manual_seed(42) model_args = ModelArgs(dropout_p=0.0) model = Transformer(model_args) ref_model = replicate(copy.deepcopy(model).cuda()) ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2) for module in model.modules(): if isinstance(module, TransformerBlock): fully_shard(module) fully_shard(model) optim = torch.optim.Adam(model.parameters(), lr=1e-2) inp = torch.randint(0, model.model_args.vocab_size, (3, 16), device="cuda") self._test_clip_grad_norm( 1, norm_type, ref_model, ref_optim, model, optim, inp )
import copy import functools from typing import Optional, Union import torch import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLPStack from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestClipGradNormWorldSize2(_TestClipGradNormBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_clip_grad_norm_.py
test_clip_grad_norm_2d
def test_clip_grad_norm_2d(self): for norm_type in (2, 1, 3, float("inf")): dp_size = 2 global_mesh = init_device_mesh( "cuda", (dp_size, self.world_size // dp_size), mesh_dim_names=("dp", "tp"), ) dp_mesh, tp_mesh = global_mesh["dp"], global_mesh["tp"] torch.manual_seed(42) # Test using an MLP stack, not a transformer, since the transformer # has some more significant numeric differences from the TP model = MLPStack(16, with_seq_parallel=True) ref_model = replicate( copy.deepcopy(model).cuda(), process_group=dp_mesh.get_group() ) ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2) model.parallelize( tp_mesh, dp_mesh, use_activation_checkpointing=False, reshard_after_forward=True, ) optim = torch.optim.Adam(model.parameters(), lr=1e-2) inp = torch.randn(2, 16, device="cuda") self._test_clip_grad_norm( 0.5, norm_type, ref_model, ref_optim, model, optim, inp, dp_mesh )
import copy import functools from typing import Optional, Union import torch import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLPStack from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestClipGradNormWorldSize4(_TestClipGradNormBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
test_all_gather_fp32
def test_all_gather_fp32(self): param_sizes = self._get_param_sizes() default_stream = torch.cuda.current_stream() stream1, stream2 = torch.cuda.Stream(), torch.cuda.Stream() for async_op, streams, reshard_after_forward in itertools.product( (False, True), ((default_stream, default_stream), (stream1, stream2)), (True, 8), ): all_gather_copy_in_stream, all_gather_stream = streams # Save test time by only testing reshard after forward as an int # for non-async and non-default streams (like in pre-backward) if type(reshard_after_forward) is int and ( async_op or all_gather_stream is default_stream ): continue self._test_all_gather( param_sizes, reshard_after_forward=reshard_after_forward, async_op=async_op, all_gather_copy_in_stream=all_gather_copy_in_stream, all_gather_stream=all_gather_stream, )
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState] class TestFullyShardCollectiveOps(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
all_gather
def all_gather(fsdp_param_group: FSDPParamGroup, group: dist.ProcessGroup): all_gather_result = foreach_all_gather( fsdp_param_group.fsdp_params, group, async_op=async_op, all_gather_copy_in_stream=all_gather_copy_in_stream, all_gather_stream=all_gather_stream, device=self.device, ) foreach_all_gather_copy_out(all_gather_result, fsdp_params, group) # Transition to unsharded state to register unsharded parameters for fsdp_param in fsdp_param_group.fsdp_params: fsdp_param.init_unsharded_param() fsdp_param_group._to_unsharded()
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
test_reduce_scatter_fp16
def test_reduce_scatter_fp16(self): param_sizes = self._get_param_sizes() default_stream = torch.cuda.current_stream() stream = torch.cuda.Stream() for reduce_scatter_stream in (default_stream, stream): self._test_reduce_scatter( param_sizes, reduce_scatter_stream=reduce_scatter_stream, reduce_scatter_dtype=torch.float16, )
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState] class TestFullyShardCollectiveOps(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
test_fully_shard_communication_count
def test_fully_shard_communication_count(self): """ Tests that FSDP issues the expected number of all-gathers and reduce-scatters during forward and backward. """ self.run_subtests( {"reshard_after_forward": [True, False, 2]}, self._test_communication_count, )
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState] class TestFullyShardCommunication(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
inductor_code_check_no_compute_op
def inductor_code_check_no_compute_op(self, file_check): return ( file_check.check_not(" = aten.") .check_not(" = extern_kernels.") .check_not(" = triton_") .check_not(" = torch.ops.") .check_not(" = inductor_ops.") .check_not(" aten.") .check_not(" extern_kernels.") .check_not(" triton_") .check_not(" torch.ops.") .check_not(" inductor_ops.") )
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton class TestFullyShardCompile(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
_fn
def _fn(gm): # fullgraph=True because graph-break in Compiled Autograd BWD graph is not supported by Traceable FSDP2 yet # (main difficulty comes from queue_callback not working well when BWD has graph break). return torch.compile( gm, backend=compiled_autograd_backend, fullgraph=True ) return _fn
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
run_iters
def run_iters(model, optim, n_iter=10, compiled_autograd_backend=None): torch.manual_seed(42) losses = [] for i in range(n_iter): inp = input_creation_fn() if compiled_autograd_backend is not None: maybe_compiled_autograd_ctx = compiled_autograd.enable( compiler_fn(compiled_autograd_backend) ) else: maybe_compiled_autograd_ctx = contextlib.nullcontext() with maybe_compiled_autograd_ctx: out = model(inp) loss = out.sum() losses.append(loss.item()) loss.backward() optim.step() optim.zero_grad(set_to_none=True) return losses
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
test_compiled
def test_compiled(): model, optim = model_init_fn() # FSDP2 does lazy init using 1st run, so run it once to init using eager mode run_iters(model, optim, n_iter=1) model_compiled = torch.compile(model, backend=backend, fullgraph=fullgraph) res = run_iters(model_compiled, optim, compiled_autograd_backend=backend) return res
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
test_eager
def test_eager(): model, optim = model_init_fn() # FSDP2 does lazy init using 1st run, so run it once to init using eager mode run_iters(model, optim, n_iter=1) res = run_iters(model, optim) return res losses_compiled = test_compiled() losses_eager = test_eager() if not self.fake_pg: for loss_compiled, loss_eager in zip(losses_compiled, losses_eager): self.assertTrue( torch.allclose( torch.tensor(loss_compiled), torch.tensor(loss_eager), rtol=1e-5, atol=1e-8, ), f"{loss_compiled} vs {loss_eager}", )
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
model_init_fn
def model_init_fn(): torch.manual_seed(self.rank) fsdp_config = {} model = nn.Sequential( nn.Linear(hidden_dim, hidden_dim, device="cuda"), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim, device="cuda"), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim, device="cuda"), ) fully_shard(model, reshard_after_forward=True, **fsdp_config) optim = torch.optim.SGD(model.parameters(), lr=1e-4) return model, optim
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
input_creation_fn
def input_creation_fn(): torch.manual_seed(self.rank) inp = torch.randn((2, hidden_dim), device="cuda", requires_grad=False) return inp return model_init_fn, input_creation_fn
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_autograd.py
test_unused_forward_module
def test_unused_forward_module(self): """ Tests that gradients propagate when running a backward where some forward module is not used to compute the loss, motivated by: https://github.com/pytorch/pytorch/pull/80245 """ self.run_subtests( {"reshard_after_forward": [True, False, 2]}, self._test_unused_forward_module, )
import collections import copy import functools import itertools import unittest from typing import Any, List, Optional, Type, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.nn.parallel.scatter_gather import _is_namedtuple from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) class TestFullyShardAutograd(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_autograd.py
test_nontensor_activations
def test_nontensor_activations(self): """ Tests that gradients propagate when running forward with nontensor data structures wrapping the activations. This is mainly to test the hook registration. """ self.run_subtests( {"container_type": [list, collections.namedtuple, tuple, dict]}, self._test_nontensor_activations, )
import collections import copy import functools import itertools import unittest from typing import Any, List, Optional, Type, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.nn.parallel.scatter_gather import _is_namedtuple from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) class TestFullyShardAutograd(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_autograd.py
_test_nontensor_activations
def _test_nontensor_activations(self, container_type: Type): class Module(nn.Module): def __init__(self, dim: int): super().__init__() self.lin1 = nn.Linear(dim, dim) self.lin2 = nn.Linear(dim, dim) self.relu = nn.ReLU() def forward(self, inp: Any): # Assume that the "0th" element of `inp` is a tensor, run some # forward computation on it, and pack it back into the same # data structure type as `inp` if isinstance(inp, list): return [self._forward(inp[0])] elif _is_namedtuple(inp): return type(inp)(*([self._forward(inp[0])] + list(inp[1:]))) elif isinstance(inp, tuple): return (self._forward(inp[0]),) elif isinstance(inp, dict): return {"x": self._forward(inp["x"])} else: raise NotImplementedError( f"Unsupported input type {type(inp)}: {inp}" ) def _forward(self, x: torch.Tensor) -> torch.Tensor: return self.relu(self.lin2(self.relu(self.lin1(x)))) class ToContainerType(nn.Module): def __init__(self, container_type: Type): super().__init__() self.container_type = container_type def forward(self, x: torch.Tensor): if self.container_type is list: return [x] elif self.container_type is collections.namedtuple: nt = collections.namedtuple("NT", "x y") return nt(x, torch.ones_like(x)) elif self.container_type is tuple: return (x,) elif self.container_type is dict: return {"x": x} else: raise NotImplementedError( f"Unsupported container type: {self.container_type}" ) class FromContainerType(nn.Module): def __init__(self, container_type: Type): super().__init__() self.container_type = container_type def forward(self, x: torch.Tensor): if self.container_type in (list, collections.namedtuple, tuple): return x[0] elif self.container_type is dict: return x["x"] else: raise NotImplementedError( f"Unsupported container type: {self.container_type}" ) torch.manual_seed(42) local_batch_size, dim = (2, 24) global_batch_size = self.world_size * local_batch_size model = nn.Sequential( ToContainerType(container_type), Module(dim), Module(dim), Module(dim), FromContainerType(container_type), ) ref_model = copy.deepcopy(model).cuda() for module in model: fully_shard(module) fully_shard(model) ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2) optim = torch.optim.Adam(model.parameters(), lr=1e-2) torch.manual_seed(1) # same on all ranks for iter_idx in range(10): global_inp = torch.rand((global_batch_size, dim), device="cuda") local_inp = global_inp[ self.rank * local_batch_size : (self.rank + 1) * local_batch_size ].detach() losses: List[torch.Tensor] = [] for _model, inp in ((ref_model, global_inp), (model, local_inp)): losses.append(_model(inp).sum()) losses[-1].backward() self._reduce_1d_partial_grads(ref_model) dist.all_reduce(losses[1]) # partial -> replicated self.assertEqual(losses[0], losses[1]) check_sharded_parity(self, ref_model, model) for _optim in (optim, ref_optim): _optim.step() _optim.zero_grad(set_to_none=(iter_idx % 2))
import collections import copy import functools import itertools import unittest from typing import Any, List, Optional, Type, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.nn.parallel.scatter_gather import _is_namedtuple from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) class TestFullyShardAutograd(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_autograd.py
__init__
def __init__(self, dim: int): super().__init__() self.lin1 = nn.Linear(dim, dim) self.lin2 = nn.Linear(dim, dim) self.relu = nn.ReLU()
import collections import copy import functools import itertools import unittest from typing import Any, List, Optional, Type, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.nn.parallel.scatter_gather import _is_namedtuple from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) class Module(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_autograd.py
forward
def forward(self, inp: Any): # Assume that the "0th" element of `inp` is a tensor, run some # forward computation on it, and pack it back into the same # data structure type as `inp` if isinstance(inp, list): return [self._forward(inp[0])] elif _is_namedtuple(inp): return type(inp)(*([self._forward(inp[0])] + list(inp[1:]))) elif isinstance(inp, tuple): return (self._forward(inp[0]),) elif isinstance(inp, dict): return {"x": self._forward(inp["x"])} else: raise NotImplementedError( f"Unsupported input type {type(inp)}: {inp}" )
import collections import copy import functools import itertools import unittest from typing import Any, List, Optional, Type, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.nn.parallel.scatter_gather import _is_namedtuple from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) class Module(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_autograd.py
__init__
def __init__(self, dim: int): super().__init__() self.lin1 = nn.Linear(dim, dim) self.lin2 = nn.Linear(dim, dim) self.relu = nn.ReLU()
import collections import copy import functools import itertools import unittest from typing import Any, List, Optional, Type, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.nn.parallel.scatter_gather import _is_namedtuple from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) class Module(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_autograd.py
forward
def forward(self, inp: Any): # Assume that the "0th" element of `inp` is a tensor, run some # forward computation on it, and pack it back into the same # data structure type as `inp` if isinstance(inp, list): return [self._forward(inp[0])] elif _is_namedtuple(inp): return type(inp)(*([self._forward(inp[0])] + list(inp[1:]))) elif isinstance(inp, tuple): return (self._forward(inp[0]),) elif isinstance(inp, dict): return {"x": self._forward(inp["x"])} else: raise NotImplementedError( f"Unsupported input type {type(inp)}: {inp}" )
import collections import copy import functools import itertools import unittest from typing import Any, List, Optional, Type, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.nn.parallel.scatter_gather import _is_namedtuple from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) class Module(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_autograd.py
__init__
def __init__(self, dim: int): super().__init__() self.lin1 = nn.Linear(dim, dim) self.lin2 = nn.Linear(dim, dim) self.relu = nn.ReLU()
import collections import copy import functools import itertools import unittest from typing import Any, List, Optional, Type, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.nn.parallel.scatter_gather import _is_namedtuple from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) class Module(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_autograd.py
forward
def forward(self, inp: Any): # Assume that the "0th" element of `inp` is a tensor, run some # forward computation on it, and pack it back into the same # data structure type as `inp` if isinstance(inp, list): return [self._forward(inp[0])] elif _is_namedtuple(inp): return type(inp)(*([self._forward(inp[0])] + list(inp[1:]))) elif isinstance(inp, tuple): return (self._forward(inp[0]),) elif isinstance(inp, dict): return {"x": self._forward(inp["x"])} else: raise NotImplementedError( f"Unsupported input type {type(inp)}: {inp}" )
import collections import copy import functools import itertools import unittest from typing import Any, List, Optional, Type, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.nn.parallel.scatter_gather import _is_namedtuple from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) class Module(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_extensions.py
_test_all_gather_extensions_train_parity
def _test_all_gather_extensions_train_parity(self, reshard_after_forward: bool): torch.manual_seed(42) model = self._init_two_tensor_mlp() ref_model = copy.deepcopy(model).cuda() ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2, foreach=True) fully_shard_fn = functools.partial( fully_shard, reshard_after_forward=reshard_after_forward ) for mlp in model: fully_shard_fn(mlp) fully_shard_fn(model) optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=True) check_sharded_parity(self, ref_model, model) torch.manual_seed(42 + self.rank + 1) inp = torch.randn((2, 8), device="cuda") for iter_idx in range(10): losses: List[torch.Tensor] = [] for _model in (ref_model, model): losses.append(_model(inp).sum()) losses[-1].backward() if _model is ref_model: for param_name, param in _model.named_parameters(): dist.all_reduce(param.grad) param.grad.detach().div_(self.world_size) self.assertEqual(losses[0], losses[1]) check_sharded_parity(self, ref_model, model) for _optim in (ref_optim, optim): _optim.step() _optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) check_sharded_parity(self, ref_model, model)
import contextlib import copy import functools import threading import unittest from typing import Any, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed.device_mesh import DeviceMesh from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.two_tensor import TwoTensor
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_extensions.py
test_all_gather_extensions_end_to_end
def test_all_gather_extensions_end_to_end(self): with self._patch_two_tensor_fsdp_all_gather(): self.run_subtests( {"reshard_after_forward": [True, False]}, self._test_all_gather_extensions_end_to_end, )
import contextlib import copy import functools import threading import unittest from typing import Any, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed.device_mesh import DeviceMesh from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.two_tensor import TwoTensor
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_extensions.py
test_all_gather_extensions_monkey_patch
def test_all_gather_extensions_monkey_patch(self): # Define a pre/post-all-gather pair that quantizes to bf16 for the # all-gather and de-quantizes back to the parameter dtype def fsdp_pre_all_gather(self) -> Tuple[Tuple[torch.Tensor, ...], Any]: return (self.to(torch.bfloat16),), None def fsdp_post_all_gather( self, all_gather_outputs: Tuple[torch.Tensor, ...], metadata: Any, param_dtype: torch.dtype, *, out: Optional[torch.Tensor] = None, ) -> Union[Tuple[torch.Tensor, Tuple[torch.Tensor, ...]], None]: (tensor,) = all_gather_outputs assert metadata is None, f"{metadata}" assert tensor.dtype == torch.bfloat16, f"{tensor.dtype}" if out is not None: out.copy_(tensor) return return tensor.to(param_dtype), (tensor,) with torch.device("meta"): model = self._init_two_tensor_mlp() for mlp in model: fully_shard(mlp) fully_shard(model) model.to_empty(device=self.device) for param in model.parameters(): nn.init.trunc_normal_(param) # Monkey patch the pre/post-all-gather functions *after* `to_empty()` # since the local tensor objects change from materialization self.assertGreater(sum("weight" in n for n, _ in model.named_parameters()), 0) for param_name, param in model.named_parameters(): if "weight" in param_name: local_param = param.to_local() # Monkey patch on the `torch.Tensor` to show that the extension # can work even without a subclass local_param.fsdp_pre_all_gather = fsdp_pre_all_gather local_param.fsdp_post_all_gather = fsdp_post_all_gather optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=True) # Run a few iterations to check for errors torch.manual_seed(42 + self.rank + 1) inp = torch.randn((2, 8), device="cuda") for _ in range(3): model(inp).sum().backward() optim.step() optim.zero_grad()
import contextlib import copy import functools import threading import unittest from typing import Any, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed.device_mesh import DeviceMesh from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.two_tensor import TwoTensor
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_frozen.py
test_train_mixed_requires_grad_per_group
def test_train_mixed_requires_grad_per_group(self): """ Tests training parity with DDP when mixing frozen and non-frozen parameters in the same FSDP communication group. This checks that the reduce-scatters reduce the expected numel and that they are called via the custom autograd function backward (i.e. that they are not delayed until the end of backward). """ self.run_subtests( { "reshard_after_forward": [False, True, 2], "use_activation_checkpointing": [False, True], "freeze_after_init": [False, True], }, self._test_train_mixed_requires_grad_per_group, )
import copy import functools import itertools from typing import List, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_param_group import ( RegisterPostBackwardFunction, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, MLP, patch_reduce_scatter, patch_register_post_backward_hook_backward, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests class TestFullyShardFrozen(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_frozen.py
assert_fn
def assert_fn(output: torch.Tensor): self.assertEqual(output.numel(), expected_numel) reduce_scatter = functools.partial( reduce_scatter_with_assert, self, orig_reduce_scatter, assert_fn ) orig_backward = RegisterPostBackwardFunction.backward backward_count = 0
import copy import functools import itertools from typing import List, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_param_group import ( RegisterPostBackwardFunction, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, MLP, patch_reduce_scatter, patch_register_post_backward_hook_backward, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_frozen.py
backward_with_count
def backward_with_count(*args, **kwargs): nonlocal backward_count backward_count += 1 return orig_backward(*args, **kwargs) torch.manual_seed(42 + self.rank + 1) device = torch.device("cuda") with patch_reduce_scatter( reduce_scatter ), patch_register_post_backward_hook_backward(backward_with_count): for iter_idx in range(10): inp = torch.randn((8, lin_dim), device=device) losses: List[torch.Tensor] = [] for _model, _optim in ((ref_model, ref_optim), (model, optim)): _optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) losses.append(_model(inp).sum()) losses[-1].backward() _optim.step() check_sharded_parity(self, ref_model, model) self.assertEqual(losses[0], losses[1]) # Check that the post-backward hooks ran through the autograd # backward, not the final callback (except possibly that of the # first MLP, which does not have an input that requires grad) self.assertTrue(backward_count >= num_mlps - 1)
import copy import functools import itertools from typing import List, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_param_group import ( RegisterPostBackwardFunction, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, MLP, patch_reduce_scatter, patch_register_post_backward_hook_backward, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_frozen.py
test_train_mixed_requires_grad_across_groups
def test_train_mixed_requires_grad_across_groups(self): """ Tests training parity with DDP when mixing frozen and non-frozen parameters across different FSDP communication groups, including possibly unfreezing parameters. """ self.run_subtests( { "reshard_after_forward": [False, True, 2], "unfreeze_params": [False, True], }, self._test_train_mixed_requires_grad_across_groups, )
import copy import functools import itertools from typing import List, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_param_group import ( RegisterPostBackwardFunction, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, MLP, patch_reduce_scatter, patch_register_post_backward_hook_backward, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests class TestFullyShardFrozen(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_frozen.py
_set_requires_grad
def _set_requires_grad(seq: nn.Module, requires_grad: bool): for i in range(num_linears): # Interleave frozen -> non-frozen -> ... linears if i % 2 == 0: for param in seq[i % 2].parameters(): param.requires_grad_(requires_grad)
import copy import functools import itertools from typing import List, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_param_group import ( RegisterPostBackwardFunction, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, MLP, patch_reduce_scatter, patch_register_post_backward_hook_backward, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_frozen.py
backward_with_count
def backward_with_count(*args, **kwargs): nonlocal backward_count backward_count += 1 return orig_backward(*args, **kwargs) torch.manual_seed(42 + self.rank + 1) device = torch.device("cuda") with patch_reduce_scatter( reduce_scatter ), patch_register_post_backward_hook_backward(backward_with_count): for iter_idx in range(10): inp = torch.randn((8, lin_dim), device=device) losses: List[torch.Tensor] = [] for _model, _optim in ((ref_model, ref_optim), (model, optim)): _optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) losses.append(_model(inp).sum()) losses[-1].backward() _optim.step() check_sharded_parity(self, ref_model, model) self.assertEqual(losses[0], losses[1]) # Check that the post-backward hooks ran through the autograd # backward, not the final callback (except possibly that of the # first MLP, which does not have an input that requires grad) self.assertTrue(backward_count >= num_mlps - 1)
import copy import functools import itertools from typing import List, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_param_group import ( RegisterPostBackwardFunction, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, MLP, patch_reduce_scatter, patch_register_post_backward_hook_backward, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
test_simple_mlp_fullgraph_backend_inductor
def test_simple_mlp_fullgraph_backend_inductor(self): self._test_traceable_fsdp( *self._create_simple_mlp_factory_fns(), "inductor", fullgraph=True )
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton class TestFullyShardCompile(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
__init__
def __init__(self, hidden_dim): super().__init__() self.param1 = nn.Parameter( torch.zeros( hidden_dim, hidden_dim, dtype=torch.float, device="cuda" ) ) self.param2 = nn.Parameter( torch.zeros(hidden_dim, dtype=torch.float, device="cuda") )
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton class TestSubmodule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
forward
def forward(self, x): if not fullgraph: torch._dynamo.graph_break() ret = torch.matmul(x, self.param1) ret = ret * self.param2 ret = torch.relu(ret) return ret
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton class TestSubmodule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added