library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_isin_asserts
def test_isin_asserts(self): A = torch.randn(size=[1, 4], device='mps', dtype=torch.float32) B = torch.randn(size=[1, 4], device='mps', dtype=torch.float16) with self.assertRaisesRegex(RuntimeError, 'Expected elements.dtype()*'): out = torch.isin(A, B) C = torch.randn(size=[1, 4], device='mps', dtype=torch.float32) D = torch.randn(size=[1, 4], device='cpu', dtype=torch.float32) with self.assertRaisesRegex(RuntimeError, 'Expected elements.is_mps()*'): out = torch.isin(C, D)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np class TestLogical(TestCaseMPS): from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_nll_loss_out_of_bounds_ignore_index
def test_nll_loss_out_of_bounds_ignore_index(self): def _test_nll_loss_out_of_bounds_ignore_index(device): output = [] x = torch.tensor([[0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1], [ 0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1]], device=device) t = torch.tensor([0, 1, 255, 0, 1, 2], dtype=torch.int64, device=device) for reduction in ['mean', 'none']: output.append(F.nll_loss(x, t, ignore_index=255, reduction=reduction)) return output output_cpu = _test_nll_loss_out_of_bounds_ignore_index(device='cpu') output_mps = _test_nll_loss_out_of_bounds_ignore_index(device='mps') for cpu, mps in zip(output_cpu, output_mps): self.assertEqual(cpu, mps.to('cpu'))
def test_nll_loss_out_of_bounds_ignore_index(self): def test_nll_loss_out_of_bounds_ignore_index_helper(device): output = [] x = torch.tensor([[0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1], [ 0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1]], device=device) t1 = torch.tensor([0, 1, 255, 0, 1, 2], dtype=torch.int64, device=device) t2 = torch.tensor([0, 1, 1, 0, -100, 2], dtype=torch.int64, device=device) for reduction in ['mean', 'none']: # out of bound ignore_index output.append(F.nll_loss(x, t1, ignore_index=255, reduction=reduction)) # default ignore_index output.append(F.nll_loss(x, t2, reduction=reduction)) return output output_cpu = test_nll_loss_out_of_bounds_ignore_index_helper(device='cpu') output_mps = test_nll_loss_out_of_bounds_ignore_index_helper(device='mps') for cpu, mps in zip(output_cpu, output_mps): self.assertEqual(cpu, mps)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch class TestNLLLoss(TestCaseMPS): import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np class TestNLLLoss(TestCaseMPS): from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_nll_loss_out_of_bounds_ignore_index_helper
def test_nll_loss_out_of_bounds_ignore_index_helper(device): output = [] x = torch.tensor([[0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1], [ 0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1]], device=device) t1 = torch.tensor([0, 1, 255, 0, 1, 2], dtype=torch.int64, device=device) t2 = torch.tensor([0, 1, 1, 0, -100, 2], dtype=torch.int64, device=device) for reduction in ['mean', 'none']: # out of bound ignore_index output.append(F.nll_loss(x, t1, ignore_index=255, reduction=reduction)) # default ignore_index output.append(F.nll_loss(x, t2, reduction=reduction)) return output output_cpu = test_nll_loss_out_of_bounds_ignore_index_helper(device='cpu') output_mps = test_nll_loss_out_of_bounds_ignore_index_helper(device='mps') for cpu, mps in zip(output_cpu, output_mps): self.assertEqual(cpu, mps)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_topk
def test_topk(self): def helper(shape): cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False) x = cpu_x.detach().clone().to('mps') for largest_val in [True, False]: if (type(shape) == tuple): for curr_dim in range(0, len(shape)): dim_size = shape[curr_dim] for k in range(1, dim_size + 1): topk_values, topk_indices = torch.topk(x, k, dim=curr_dim, largest=largest_val) topk_values_cpu, topk_indices_cpu = torch.topk(cpu_x, k, dim=curr_dim, largest=largest_val) self.assertEqual(topk_values, topk_values_cpu) self.assertEqual(topk_indices, topk_indices_cpu) else: for k in range(1, shape): topk_values, topk_indices = torch.topk(x, k, dim=0, largest=largest_val) topk_values_cpu, topk_indices_cpu = torch.topk(cpu_x, k, dim=0, largest=largest_val) self.assertEqual(topk_values, topk_values_cpu) self.assertEqual(topk_indices, topk_indices_cpu) helper(2) helper((5, 1)) helper((1, 5)) helper((5, 9, 7, 4)) helper((50, 20, 7, 4))
def test_topk(self): largest_vals = [True, False] shapes = [ # Zero Element Tensors 0, (1, 0), (0, 1), (1, 0, 1), # Multiple Element Tensors 1, 2, (5, 1), (1, 5), (5, 9, 7, 4), ] for shape in shapes: for largest_val in largest_vals: with self.subTest(shape=shape, largest_val=largest_val): self._test_topk(shape, largest_val)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch class TestNLLLoss(TestCaseMPS): import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np class TestTopK(TestCase): from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
_create_basic_net
def _create_basic_net(self): class Layer(nn.Module): def __init__(self): super().__init__() self.layer_dummy_param = Parameter(torch.empty(3, 5)) self.register_buffer('layer_dummy_buf', torch.zeros(1, 3, 3, 7)) class Net(nn.Module): def __init__(self): super().__init__() self.l1 = Layer() self.dummy_param = Parameter(torch.empty(3, 5)) self.register_buffer('dummy_buf', torch.zeros(7, 3, 3, 1)) l = Layer() n = Net() s = nn.Sequential(n, n) return l, n, s
def _create_basic_net(self): class Layer(nn.Module): def __init__(self) -> None: super().__init__() self.layer_dummy_param = Parameter(torch.empty(3, 5)) self.layer_dummy_buf = Buffer(torch.zeros(1, 3, 3, 7)) class Net(nn.Module): def __init__(self) -> None: super().__init__() self.l1 = Layer() self.dummy_param = Parameter(torch.empty(3, 5)) self.dummy_buf = Buffer(torch.zeros(7, 3, 3, 1)) l = Layer() n = Net() s = nn.Sequential(n, n) return l, n, s
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np class TestNNMPS(NNTestCase): from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np class TestNNMPS(NNTestCase): from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
convert_weight_to_int4pack
def convert_weight_to_int4pack(b): b_int32, b_scales_and_zeros = _group_quantize_tensor( b.to("cpu"), n_bit=4, q_group_size=q_group ) b_int32 = b_int32.to("mps") b_scales_and_zeros = b_scales_and_zeros.to("mps") b_int4pack = torch._convert_weight_to_int4pack( b_int32, inner_k_tiles ) return b_int4pack, b_scales_and_zeros
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
weight_int4pack_mm
def weight_int4pack_mm(a, b_int4pack, b_scales_and_zeros): return torch._weight_int4pack_mm( a, b_int4pack, q_group, b_scales_and_zeros ) b_int4pack, b_scales_and_zeros_f32 = convert_weight_to_int4pack(b_f32) for dtype in [torch.float16, torch.float32] + ([torch.bfloat16] if product_version > 14.0 else []): a = a_f32.to(dtype=dtype) b = b_f32.to(dtype=dtype) b_scales_and_zeros = b_scales_and_zeros_f32.to(dtype=dtype) ref = torch.mm(a, b) res = weight_int4pack_mm(a, b_int4pack, b_scales_and_zeros) mean_err = ((res - ref).abs() / ref).mean() self.assertLess(mean_err, 0.05)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test__int8_mm
def test__int8_mm(self, m, k, n): torch.manual_seed(1) a_f32 = torch.rand((m, k), device="mps") b_f32 = torch.rand((n, k), device="mps") def convert_weight_to_int8pack(b): b_int8pack, b_scales, _ = _dynamically_quantize_per_channel( b, -128, 127, torch.int8 ) return b_int8pack, b_scales def weight_int8pack_mm(a, b_int8pack, b_scales): return torch._weight_int8pack_mm(a, b_int8pack, b_scales) b_int8pack, b_scales_f32 = convert_weight_to_int8pack(b_f32) for dtype in [torch.float16, torch.float32] + ([torch.bfloat16] if product_version > 14.0 else []): a = a_f32.to(dtype=dtype) b = b_f32.to(dtype=dtype) b_scales = b_scales_f32.to(dtype=dtype) res = weight_int8pack_mm(a, b_int8pack, b_scales) ref = torch.mm(a, b.transpose(0, 1)) mean_err = ((res - ref).abs() / ref).mean() self.assertLess(mean_err, 0.05)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning class TestLinalgMPS(TestCaseMPS): from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
convert_weight_to_int8pack
def convert_weight_to_int8pack(b): b_int8pack, b_scales, _ = _dynamically_quantize_per_channel( b, -128, 127, torch.int8 ) return b_int8pack, b_scales
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
weight_int8pack_mm
def weight_int8pack_mm(a, b_int8pack, b_scales): return torch._weight_int8pack_mm(a, b_int8pack, b_scales) b_int8pack, b_scales_f32 = convert_weight_to_int8pack(b_f32) for dtype in [torch.float16, torch.float32] + ([torch.bfloat16] if product_version > 14.0 else []): a = a_f32.to(dtype=dtype) b = b_f32.to(dtype=dtype) b_scales = b_scales_f32.to(dtype=dtype) res = weight_int8pack_mm(a, b_int8pack, b_scales) ref = torch.mm(a, b.transpose(0, 1)) mean_err = ((res - ref).abs() / ref).mean() self.assertLess(mean_err, 0.05)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
_compare_tensors
def _compare_tensors(self, y, ref): denom = torch.maximum(ref.abs(), torch.tensor([1e-6], device=ref.device, dtype=ref.dtype)) err = ((y - ref).abs() / denom).mean().item() self.assertLess(err, 0.01)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestSDPA(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_sdpa_no_mask_no_causal_fp32
def test_sdpa_no_mask_no_causal_fp32(self): self._test_sdpa_no_mask(False, torch.float32)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestSDPA(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_sdpa_no_mask_no_causal_fp16
def test_sdpa_no_mask_no_causal_fp16(self): self._test_sdpa_no_mask(False, torch.float16)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestSDPA(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_sdpa_no_mask_causal_fp32
def test_sdpa_no_mask_causal_fp32(self): self._test_sdpa_no_mask(True, torch.float32)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestSDPA(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_sdpa_no_mask_causal_fp16
def test_sdpa_no_mask_causal_fp16(self): self._test_sdpa_no_mask(True, torch.float16)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestSDPA(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_matrix_rank
def test_matrix_rank(self, device="mps", dtype=torch.float32): matrix_rank = torch.linalg.matrix_rank def run_test(shape0, shape1, batch): a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device) rank_a = matrix_rank(a) self.assertEqual(rank_a, matrix_rank(a.mH)) aaH = torch.matmul(a, a.mH) rank_aaH = matrix_rank(aaH) rank_aaH_hermitian = matrix_rank(aaH, hermitian=True) self.assertEqual(rank_aaH, rank_aaH_hermitian) aHa = torch.matmul(a.mH, a) self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True)) # check against NumPy self.assertEqual(rank_a, np.linalg.matrix_rank(a.cpu().numpy())) self.assertEqual(matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01)) self.assertEqual(rank_aaH, np.linalg.matrix_rank(aaH.cpu().numpy())) self.assertEqual(matrix_rank(aaH, 0.01), np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01)) # hermitian flag for NumPy was added in 1.14.0 if np.lib.NumpyVersion(np.__version__) >= '1.14.0': self.assertEqual(rank_aaH_hermitian, np.linalg.matrix_rank(aaH.cpu().numpy(), hermitian=True)) self.assertEqual(matrix_rank(aaH, 0.01, True), np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01, True)) # check out= variant out = torch.empty(a.shape[:-2], dtype=torch.int64, device=device) ans = matrix_rank(a, out=out) self.assertEqual(ans, out) self.assertEqual(ans, rank_a) shapes = (3, 13) batches = ((), (0, ), (4, ), (3, 5, )) for (shape0, shape1), batch in zip(itertools.product(shapes, reversed(shapes)), batches): # escape only when NotImplementedError of downstream function is raised # TODO: remove this once the required function is implemented try: run_test(shape0, shape1, batch) except NotImplementedError as e: with self.assertRaisesRegex( NotImplementedError, "The operator 'aten::_linalg_svd.U' is not currently implemented for the MPS device."): raise e
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning class TestLinalgMPS(TestCaseMPS): from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
run_test_main
def run_test_main(A, hermitian): # Testing against definition for pseudo-inverses A_pinv = torch.linalg.pinv(A, hermitian=hermitian) np_A = A.cpu().numpy() np_A_pinv = A_pinv.cpu().numpy() if A.numel() > 0: self.assertEqual(A, np_A @ np_A_pinv @ np_A, atol=precision, rtol=precision) self.assertEqual(A_pinv, np_A_pinv @ np_A @ np_A_pinv, atol=precision, rtol=precision) self.assertEqual(np_A @ np_A_pinv, (np_A @ np_A_pinv).conj().swapaxes(-2, -1), atol=precision, rtol=precision) self.assertEqual(np_A_pinv @ np_A, (np_A_pinv @ np_A).conj().swapaxes(-2, -1), atol=precision, rtol=precision) else: self.assertEqual(A.shape, A_pinv.shape[:-2] + (A_pinv.shape[-1], A_pinv.shape[-2])) # Check out= variant out = torch.empty_like(A_pinv) ans = torch.linalg.pinv(A, hermitian=hermitian, out=out) self.assertEqual(ans, out) self.assertEqual(ans, A_pinv)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
run_test_numpy
def run_test_numpy(A, hermitian): # Check against NumPy output # Test float rcond, and specific value for each matrix rconds = [float(torch.rand(1)), ] # Test different types of rcond tensor for rcond_type in MPS_DTYPES: rconds.append(torch.rand(A.shape[:-2], dtype=torch.float32, device=device).to(rcond_type)) # Test broadcasting of rcond if A.ndim > 2: rconds.append(torch.rand(A.shape[-3], device=device)) for rcond in rconds: actual = torch.linalg.pinv(A, rcond=rcond, hermitian=hermitian) torch_rtol = torch.linalg.pinv(A, rtol=rcond, hermitian=hermitian) self.assertEqual(actual, torch_rtol, atol=precision, rtol=precision) numpy_rcond = rcond if isinstance(rcond, float) else rcond.cpu().numpy() expected = np.linalg.pinv(A.cpu().numpy(), rcond=numpy_rcond, hermitian=hermitian) self.assertEqual(actual, expected, atol=precision, rtol=precision) for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5), # square matrices (3, 2), (5, 3, 2), (2, 5, 3, 2), # fat matrices (2, 3), (5, 2, 3), (2, 5, 2, 3), # thin matrices (0, 0), (0, 2), (2, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3)]: # zero numel matrices A = torch.randn(*sizes, dtype=dtype, device=device) hermitian = False run_test_main(A, hermitian) run_test_numpy(A, hermitian) # Check hermitian = True for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5), # square matrices (0, 0), (3, 0, 0), ]: # zero numel square matrices A = random_hermitian_pd_matrix(sizes[-1], *sizes[:-2], dtype=dtype, device=device) hermitian = True # escape only when NotImplementedError of downstream function is raised # TODO: remove this once the required function is implemented try: run_test_main(A, hermitian) except NotImplementedError as e: with self.assertRaisesRegex( NotImplementedError, "The operator 'aten::_linalg_eigh.eigenvalues' is not currently implemented for the MPS device."): raise e try: run_test_numpy(A, hermitian) except NotImplementedError as e: with self.assertRaisesRegex( NotImplementedError, "The operator 'aten::_linalg_eigh.eigenvalues' is not currently implemented for the MPS device."): raise e
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_conv3d_backward_collision
def test_conv3d_backward_collision(self): # Conv3D is only available from MacOS 13.2 onwards x = torch.rand(1, 1, 10, 10, 20, device="mps", requires_grad=True) m1 = nn.Conv3d(1, 1, 3, stride=2, padding=1).to("mps") m2 = nn.Conv3d(1, 1, 4, stride=2, padding=1).to("mps") y1, y2 = m1(x), m2(x) self.assertEqual(y1.shape, y2.shape) y1.sum().backward() # This used to crash with MPSNDArrayConvolutionA14.mm:4352: failed assertion y2.sum().backward()
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np class TestNNMPS(NNTestCase): from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_sdpa_no_mask_causal_fp16_L7
def test_sdpa_no_mask_causal_fp16_L7(self): self._test_sdpa_no_mask(True, torch.float16, 7)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestSDPA(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_sdpa_no_mask_causal_fp16_L7_S17
def test_sdpa_no_mask_causal_fp16_L7_S17(self): self._test_sdpa_no_mask(True, torch.float16, 7, 17)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestSDPA(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_sdpa_no_mask_causal_fp16_L7_S17_NH23_HS121
def test_sdpa_no_mask_causal_fp16_L7_S17_NH23_HS121(self): self._test_sdpa_no_mask(True, torch.float16, 7, 17, 23, 121)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestSDPA(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_sdpa_no_mask_no_causal_fp32_grad
def test_sdpa_no_mask_no_causal_fp32_grad(self): self._test_sdpa_no_mask(False, torch.float32, requires_grad=True) with torch.no_grad(): self._test_sdpa_no_mask(False, torch.float32, requires_grad=True)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestSDPA(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
_test_sdpa_mask
def _test_sdpa_mask(self, dtype: torch.dtype, L: int = 1, S: int = 72, NH: int = 32, HS: int = 128): torch.manual_seed(1729) causal_mask = torch.tril(torch.ones(S, S, dtype=torch.bool, device='mps')) with torch.nn.attention.sdpa_kernel([torch.nn.attention.SDPBackend.MATH]): i = 42 q = torch.randn([1, NH, L, HS], dtype=dtype, device="mps") k = torch.randn([1, NH, S, HS], dtype=q.dtype, device="mps") v = torch.randn([1, NH, S, HS], dtype=q.dtype, device="mps") input_pos = torch.tensor([i], dtype=torch.int32, device='mps') mask = causal_mask[None, None, input_pos] y = F.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) y_ref = F.scaled_dot_product_attention(q.cpu(), k.cpu(), v.cpu(), attn_mask=mask.cpu(), dropout_p=0.0, is_causal=False) self._compare_tensors(y.cpu(), y_ref)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestSDPA(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_sdpa_mask_fp32
def test_sdpa_mask_fp32(self): self._test_sdpa_mask(torch.float32)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestSDPA(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_sdpa_mask_fp16
def test_sdpa_mask_fp16(self): self._test_sdpa_mask(torch.float16)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestSDPA(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_sdpa_mask_fp16_L6
def test_sdpa_mask_fp16_L6(self): self._test_sdpa_mask(torch.float16, 6)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestSDPA(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_multiprocessing.py
test_fill
def test_fill(): x = torch.zeros(5, 5).to(device, dtype) q = ctx.Queue() e = ctx.Event() data = [x, x[:, 1]] q.put(data) p = ctx.Process(target=simple_fill, args=(q, e)) p.daemon = True lc.check_pid(p.pid) p.start() total_waiting_time = 0 waiting_time = 0.5 is_set = False # Once the child process is done, it will set the event to notify the # parent accordingly while total_waiting_time <= MAX_WAITING_TIME_IN_SECONDS and not is_set: time.sleep(waiting_time) total_waiting_time += waiting_time is_set = e.is_set() self.assertTrue(is_set) self.assertTrue(data[0].eq(4).all()) self.assertTrue(data[1].eq(4).all()) p.join(100) self.assertFalse(p.is_alive())
def test_fill(): x = torch.zeros(5, 5).to(device, dtype) q = ctx.Queue() e = ctx.Event() data = [x, x[:, 1]] q.put(data) p = ctx.Process(target=simple_fill, args=(q, e)) p.daemon = True lc.check_pid(p.pid) p.start() total_waiting_time = 0 waiting_time = 0.5 is_set = False # Once the child process is done, it will set the event to notify the # parent accordingly while total_waiting_time <= MAX_WAITING_TIME_IN_SECONDS and not is_set: time.sleep(waiting_time) total_waiting_time += waiting_time is_set = e.is_set() self.assertTrue(is_set) if device != "meta": self.assertTrue(data[0].eq(4).all()) self.assertTrue(data[1].eq(4).all()) p.join(100) self.assertFalse(p.is_alive())
import contextlib import gc import os import sys import time import unittest import copy from sys import platform import torch import torch.cuda import torch.multiprocessing as mp import torch.utils.hooks from torch.nn import Parameter from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ASAN, load_tests, slowTest, TEST_WITH_TSAN, TEST_WITH_TORCHDYNAMO, TEST_WITH_ROCM, IS_MACOS) load_tests = load_tests TEST_REPEATS = 30 HAS_SHM_FILES = os.path.isdir('/dev/shm') MAX_WAITING_TIME_IN_SECONDS = 30 TEST_CUDA_IPC = torch.cuda.is_available() and \ sys.platform != 'darwin' and \ sys.platform != 'win32' and \ not TEST_WITH_ROCM # https://github.com/pytorch/pytorch/issues/90940 TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1
import contextlib import copy import gc import os import sys import time import unittest from sys import platform import torch import torch.cuda import torch.multiprocessing as mp import torch.utils.hooks from torch.nn import Parameter from torch.testing._internal.common_cuda import IS_JETSON from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, load_tests, NO_MULTIPROCESSING_SPAWN, run_tests, slowTest, TEST_WITH_ASAN, TEST_WITH_ROCM, TEST_WITH_TORCHDYNAMO, TEST_WITH_TSAN, TestCase, ) load_tests = load_tests TEST_REPEATS = 30 HAS_SHM_FILES = os.path.isdir("/dev/shm") MAX_WAITING_TIME_IN_SECONDS = 30 TEST_CUDA_IPC = ( torch.cuda.is_available() and sys.platform != "darwin" and sys.platform != "win32" and not IS_JETSON and not TEST_WITH_ROCM ) # https://github.com/pytorch/pytorch/issues/90940 TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_multiprocessing.py
test_receive
def test_receive(): q = ctx.Queue() e = ctx.Event() p = ctx.Process(target=send_tensor, args=(q, e, device, dtype)) p.daemon = True lc.check_pid(p.pid) p.start() t1 = q.get() t2 = q.get() self.assertTrue(t1.eq(1).all()) s1 = t1.storage() s2 = t2.storage() self.assertEqual(type(s1), type(s2)) self.assertEqual(s1.data_ptr(), s1.data_ptr()) self.assertEqual(s1, s2) # We need to delete this tensors to allow producer (child process) # collect them properly del t1, t2 # Mark the event as done and join the process e.set() p.join(100) self.assertFalse(p.is_alive()) with leak_checker(self) as lc: for _ in range(repeat): test_fill() test_receive()
def test_receive(): q = ctx.Queue() e = ctx.Event() p = ctx.Process(target=send_tensor, args=(q, e, device, dtype)) p.daemon = True lc.check_pid(p.pid) p.start() t1 = q.get() t2 = q.get() if device == "meta": self.assertEqual(t1.size(), t2.size()) else: self.assertTrue(t1.eq(1).all()) s1 = t1.storage() s2 = t2.storage() self.assertEqual(type(s1), type(s2)) self.assertEqual(s1.data_ptr(), s1.data_ptr()) if device == "meta": self.assertEqual(s1.size(), s2.size()) else: self.assertEqual(s1, s2) # We need to delete this tensors to allow producer (child process) # collect them properly del t1, t2 # Mark the event as done and join the process e.set() p.join(100) self.assertFalse(p.is_alive()) with leak_checker(self) as lc: for _ in range(repeat): test_fill() test_receive()
import contextlib import gc import os import sys import time import unittest import copy from sys import platform import torch import torch.cuda import torch.multiprocessing as mp import torch.utils.hooks from torch.nn import Parameter from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ASAN, load_tests, slowTest, TEST_WITH_TSAN, TEST_WITH_TORCHDYNAMO, TEST_WITH_ROCM, IS_MACOS) load_tests = load_tests TEST_REPEATS = 30 HAS_SHM_FILES = os.path.isdir('/dev/shm') MAX_WAITING_TIME_IN_SECONDS = 30 TEST_CUDA_IPC = torch.cuda.is_available() and \ sys.platform != 'darwin' and \ sys.platform != 'win32' and \ not TEST_WITH_ROCM # https://github.com/pytorch/pytorch/issues/90940 TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1
import contextlib import copy import gc import os import sys import time import unittest from sys import platform import torch import torch.cuda import torch.multiprocessing as mp import torch.utils.hooks from torch.nn import Parameter from torch.testing._internal.common_cuda import IS_JETSON from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, load_tests, NO_MULTIPROCESSING_SPAWN, run_tests, slowTest, TEST_WITH_ASAN, TEST_WITH_ROCM, TEST_WITH_TORCHDYNAMO, TEST_WITH_TSAN, TestCase, ) load_tests = load_tests TEST_REPEATS = 30 HAS_SHM_FILES = os.path.isdir("/dev/shm") MAX_WAITING_TIME_IN_SECONDS = 30 TEST_CUDA_IPC = ( torch.cuda.is_available() and sys.platform != "darwin" and sys.platform != "win32" and not IS_JETSON and not TEST_WITH_ROCM ) # https://github.com/pytorch/pytorch/issues/90940 TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_multiprocessing.py
test_empty_tensor_sharing_meta
def test_empty_tensor_sharing_meta(self): self._test_empty_tensor_sharing(torch.float32, torch.device("meta")) self._test_empty_tensor_sharing(torch.int64, torch.device("meta"))
import contextlib import copy import gc import os import sys import time import unittest from sys import platform import torch import torch.cuda import torch.multiprocessing as mp import torch.utils.hooks from torch.nn import Parameter from torch.testing._internal.common_cuda import IS_JETSON from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, load_tests, NO_MULTIPROCESSING_SPAWN, run_tests, slowTest, TEST_WITH_ASAN, TEST_WITH_ROCM, TEST_WITH_TORCHDYNAMO, TEST_WITH_TSAN, TestCase, ) load_tests = load_tests TEST_REPEATS = 30 HAS_SHM_FILES = os.path.isdir("/dev/shm") MAX_WAITING_TIME_IN_SECONDS = 30 TEST_CUDA_IPC = ( torch.cuda.is_available() and sys.platform != "darwin" and sys.platform != "win32" and not IS_JETSON and not TEST_WITH_ROCM ) # https://github.com/pytorch/pytorch/issues/90940 TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1 @unittest.skipIf( TEST_WITH_TSAN, "TSAN is not fork-safe since we're forking in a multi-threaded environment", ) class TestMultiprocessing(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_multiprocessing.py
test_tensor_sharing_meta
def test_tensor_sharing_meta(self): dtype = torch.float32 device = torch.device("meta") q = mp.Queue() empty = torch.tensor([1], dtype=dtype, device=device) q.put(empty) out = q.get(timeout=1) self.assertEqual(out, empty)
import contextlib import copy import gc import os import sys import time import unittest from sys import platform import torch import torch.cuda import torch.multiprocessing as mp import torch.utils.hooks from torch.nn import Parameter from torch.testing._internal.common_cuda import IS_JETSON from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, load_tests, NO_MULTIPROCESSING_SPAWN, run_tests, slowTest, TEST_WITH_ASAN, TEST_WITH_ROCM, TEST_WITH_TORCHDYNAMO, TEST_WITH_TSAN, TestCase, ) load_tests = load_tests TEST_REPEATS = 30 HAS_SHM_FILES = os.path.isdir("/dev/shm") MAX_WAITING_TIME_IN_SECONDS = 30 TEST_CUDA_IPC = ( torch.cuda.is_available() and sys.platform != "darwin" and sys.platform != "win32" and not IS_JETSON and not TEST_WITH_ROCM ) # https://github.com/pytorch/pytorch/issues/90940 TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1 @unittest.skipIf( TEST_WITH_TSAN, "TSAN is not fork-safe since we're forking in a multi-threaded environment", ) class TestMultiprocessing(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_multiprocessing.py
test_meta_simple
def test_meta_simple(self): self._test_sharing(mp.get_context("spawn"), "meta", torch.float)
import contextlib import copy import gc import os import sys import time import unittest from sys import platform import torch import torch.cuda import torch.multiprocessing as mp import torch.utils.hooks from torch.nn import Parameter from torch.testing._internal.common_cuda import IS_JETSON from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, load_tests, NO_MULTIPROCESSING_SPAWN, run_tests, slowTest, TEST_WITH_ASAN, TEST_WITH_ROCM, TEST_WITH_TORCHDYNAMO, TEST_WITH_TSAN, TestCase, ) load_tests = load_tests TEST_REPEATS = 30 HAS_SHM_FILES = os.path.isdir("/dev/shm") MAX_WAITING_TIME_IN_SECONDS = 30 TEST_CUDA_IPC = ( torch.cuda.is_available() and sys.platform != "darwin" and sys.platform != "win32" and not IS_JETSON and not TEST_WITH_ROCM ) # https://github.com/pytorch/pytorch/issues/90940 TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1 @unittest.skipIf( TEST_WITH_TSAN, "TSAN is not fork-safe since we're forking in a multi-threaded environment", ) class TestMultiprocessing(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_multiprocessing_spawn.py
setUp
def setUp(self): super().setUp() self.orig_paralell_env_val = os.environ.get(mp.ENV_VAR_PARALLEL_START) os.environ[mp.ENV_VAR_PARALLEL_START] = "1"
import os import pickle import random import signal import sys import time import unittest import torch.multiprocessing as mp from torch.testing._internal.common_utils import ( IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, run_tests, TestCase, ) @unittest.skipIf( IS_WINDOWS, "Fork is only available on Unix", ) class ParallelForkServerShouldWorkTest(TestCase, _TestMultiProcessing):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_multiprocessing_spawn.py
tearDown
def tearDown(self): super().tearDown() if self.orig_paralell_env_val is None: del os.environ[mp.ENV_VAR_PARALLEL_START] else: os.environ[mp.ENV_VAR_PARALLEL_START] = self.orig_paralell_env_val
import os import pickle import random import signal import sys import time import unittest import torch.multiprocessing as mp from torch.testing._internal.common_utils import ( IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, run_tests, TestCase, ) @unittest.skipIf( IS_WINDOWS, "Fork is only available on Unix", ) class ParallelForkServerShouldWorkTest(TestCase, _TestMultiProcessing):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_multiprocessing_spawn.py
test_forkserver_perf
def test_forkserver_perf(self): start_method = 'forkserver' expensive = Expensive() nprocs = 4 orig_paralell_env_val = os.environ.get(mp.ENV_VAR_PARALLEL_START) # test the non parallel case os.environ[mp.ENV_VAR_PARALLEL_START] = "0" start = time.perf_counter() mp.start_processes(expensive.my_call, nprocs=nprocs, start_method=start_method) elapsed = time.perf_counter() - start # the elapsed time should be at least {nprocs}x the sleep time self.assertGreaterEqual(elapsed, Expensive.SLEEP_SECS * nprocs) # test the parallel case os.environ[mp.ENV_VAR_PARALLEL_START] = "1" start = time.perf_counter() mp.start_processes(expensive.my_call, nprocs=nprocs, start_method=start_method) elapsed = time.perf_counter() - start # the elapsed time should be less than {nprocs}x the sleep time self.assertLess(elapsed, Expensive.SLEEP_SECS * nprocs) if orig_paralell_env_val is None: del os.environ[mp.ENV_VAR_PARALLEL_START] else: os.environ[mp.ENV_VAR_PARALLEL_START] = orig_paralell_env_val
import os import pickle import random import signal import sys import time import unittest import torch.multiprocessing as mp from torch.testing._internal.common_utils import ( IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, run_tests, TestCase, ) @unittest.skipIf( IS_WINDOWS, "Fork is only available on Unix", ) class ParallelForkServerPerfTest(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_multiprocessing_spawn.py
__init__
def __init__(self): self.config: str = "*" * 1000000
import os import pickle import random import signal import sys import time import unittest import torch.multiprocessing as mp from torch.testing._internal.common_utils import ( IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, run_tests, TestCase, ) class Expensive:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_multiprocessing.py
test_set_thread_name
if __name__ == '__main__': run_tests()
def test_set_thread_name(self): name = "test name" mp._set_thread_name(name) self.assertEqual(mp._get_thread_name(), name)
import contextlib import copy import gc import os import sys import time import unittest from sys import platform import torch import torch.cuda import torch.multiprocessing as mp import torch.utils.hooks from torch.nn import Parameter from torch.testing._internal.common_cuda import IS_JETSON from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, load_tests, NO_MULTIPROCESSING_SPAWN, run_tests, slowTest, TEST_WITH_ASAN, TEST_WITH_ROCM, TEST_WITH_TORCHDYNAMO, TEST_WITH_TSAN, TestCase, ) load_tests = load_tests TEST_REPEATS = 30 HAS_SHM_FILES = os.path.isdir("/dev/shm") MAX_WAITING_TIME_IN_SECONDS = 30 TEST_CUDA_IPC = ( torch.cuda.is_available() and sys.platform != "darwin" and sys.platform != "win32" and not IS_JETSON and not TEST_WITH_ROCM ) # https://github.com/pytorch/pytorch/issues/90940 TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1 @unittest.skipIf( TEST_WITH_TSAN, "TSAN is not fork-safe since we're forking in a multi-threaded environment", ) class TestMultiprocessing(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
is_view_of
def is_view_of(self, base, other): if (not other._is_view() or other is base or other._base is not base or base.device != other.device): return False # Note: only validates storage on native device types # because some accelerators, like XLA, do not expose storage if base.device.type == 'mps': if base.storage().data_ptr() != other.storage().data_ptr(): return False return True # Returns true if v1 and v2 are views of the same base
def is_view_of(self, base, other): if (not other._is_view() or other is base or other._base is not base or base.device != other.device): return False # Note: only validates storage on native device types # because some accelerators, like XLA, do not expose storage if base.device.type == 'mps': if base.untyped_storage().data_ptr() != other.untyped_storage().data_ptr(): return False return True # Returns true if v1 and v2 are views of the same base
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning class TestViewOpsMPS(TestCaseMPS):
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestViewOpsMPS(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_namedtensor.py
check_repr
def check_repr(named_tensor): unnamed_tensor = named_tensor.rename(None) names_tag = 'names={}'.format(named_tensor.names) self.assertIn(names_tag, repr(named_tensor)) check_repr(torch.randn(128, 3, 64, 64, names=('N', 'C', 'H', 'W')))
def check_repr(named_tensor): unnamed_tensor = named_tensor.rename(None) names_tag = f'names={named_tensor.names}' self.assertIn(names_tag, repr(named_tensor)) check_repr(torch.randn(128, 3, 64, 64, names=('N', 'C', 'H', 'W')))
import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_device_type import get_all_device_types from collections import namedtuple, OrderedDict import itertools import functools import torch from torch import Tensor import torch.nn.functional as F from multiprocessing.reduction import ForkingPickler import pickle import io import sys import warnings Function = namedtuple('TestCase', ['name', 'lambd']) import numpy as np
import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY from torch.testing._internal.common_utils import skipIfTorchDynamo from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_device_type import get_all_device_types from collections import namedtuple, OrderedDict import itertools import functools import torch from torch import Tensor import torch.nn.functional as F from multiprocessing.reduction import ForkingPickler import pickle import io import sys import warnings Function = namedtuple('TestCase', ['name', 'lambd']) import numpy as np
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_namedtensor.py
test_size
def test_size(self): t = torch.empty(2, 3, 5, names=('N', None, 'C')) self.assertEqual(t.size('N'), 2) self.assertEqual(t.size('C'), 5) with self.assertRaisesRegex(RuntimeError, 'Please look up dimensions by name*'): t.size(None) with self.assertRaisesRegex(RuntimeError, 'Name \'channels\' not found in '): t.size('channels') with self.assertRaisesRegex(RuntimeError, 'Name \'N\' not found in '): torch.empty(2, 3, 4).size('N')
def test_size(self): t = torch.empty(2, 3, 5, names=('N', None, 'C')) self.assertEqual(t.size('N'), 2) self.assertEqual(t.size('C'), 5) with self.assertRaisesRegex(RuntimeError, 'Name \'channels\' not found in '): t.size('channels') with self.assertRaisesRegex(RuntimeError, 'Name \'N\' not found in '): torch.empty(2, 3, 4).size('N')
import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_device_type import get_all_device_types from collections import namedtuple, OrderedDict import itertools import functools import torch from torch import Tensor import torch.nn.functional as F from multiprocessing.reduction import ForkingPickler import pickle import io import sys import warnings Function = namedtuple('TestCase', ['name', 'lambd']) class TestNamedTensor(TestCase): import numpy as np
import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY from torch.testing._internal.common_utils import skipIfTorchDynamo from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_device_type import get_all_device_types from collections import namedtuple, OrderedDict import itertools import functools import torch from torch import Tensor import torch.nn.functional as F from multiprocessing.reduction import ForkingPickler import pickle import io import sys import warnings Function = namedtuple('TestCase', ['name', 'lambd']) class TestNamedTensor(TestCase): import numpy as np
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_namedtensor.py
test_stride
def test_stride(self): t = torch.empty(2, 3, 5, names=('N', None, 'C')) self.assertEqual(t.stride('N'), 3 * 5) self.assertEqual(t.stride('C'), 1) with self.assertRaisesRegex(RuntimeError, 'Please look up dimensions by name'): t.stride(None) with self.assertRaisesRegex(RuntimeError, 'Name \'channels\' not found in '): t.stride('channels') with self.assertRaisesRegex(RuntimeError, 'Name \'N\' not found in '): torch.empty(2, 3, 4).stride('N')
def test_stride(self): t = torch.empty(2, 3, 5, names=('N', None, 'C')) self.assertEqual(t.stride('N'), 3 * 5) self.assertEqual(t.stride('C'), 1) with self.assertRaisesRegex(RuntimeError, 'Name \'channels\' not found in '): t.stride('channels') with self.assertRaisesRegex(RuntimeError, 'Name \'N\' not found in '): torch.empty(2, 3, 4).stride('N')
import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_device_type import get_all_device_types from collections import namedtuple, OrderedDict import itertools import functools import torch from torch import Tensor import torch.nn.functional as F from multiprocessing.reduction import ForkingPickler import pickle import io import sys import warnings Function = namedtuple('TestCase', ['name', 'lambd']) class TestNamedTensor(TestCase): import numpy as np
import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY from torch.testing._internal.common_utils import skipIfTorchDynamo from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_device_type import get_all_device_types from collections import namedtuple, OrderedDict import itertools import functools import torch from torch import Tensor import torch.nn.functional as F from multiprocessing.reduction import ForkingPickler import pickle import io import sys import warnings Function = namedtuple('TestCase', ['name', 'lambd']) class TestNamedTensor(TestCase): import numpy as np
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_inplace_view_add
def test_inplace_view_add(self): # https://github.com/pytorch/pytorch/issues/96153 t_mps = torch.ones((2, 6,), device='mps')[1].reshape(2, 3) t_cpu = torch.ones((2, 6,), device='cpu')[1].reshape(2, 3) t_mps = t_mps + 1 t_cpu = t_cpu + 1 self.assertEqual(t_mps, t_cpu)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestViewOpsMPS(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_squeeze_view
def test_squeeze_view(self, device="mps"): t = torch.ones(5, 1, 5, device=device) v = torch.squeeze(t) self.assertTrue(self.is_view_of(t, v)) v[0, 1] = 0 self.assertTrue(t is v._base)
def test_squeeze_view(self, device="mps"): t = torch.ones(5, 1, 5, device=device) v = torch.squeeze(t) self.assertTrue(self.is_view_of(t, v)) v[0, 1] = 0 self.assertIs(t, v._base)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning class TestViewOpsMPS(TestCaseMPS):
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestViewOpsMPS(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_squeeze_inplace_view
def test_squeeze_inplace_view(self, device="mps"): t = torch.ones(5, 5, device=device) v = t.view_as(t) v = v.squeeze_() self.assertTrue(self.is_view_of(t, v)) v[0, 1] = 0 self.assertTrue(t is v._base)
def test_squeeze_inplace_view(self, device="mps"): t = torch.ones(5, 5, device=device) v = t.view_as(t) v = v.squeeze_() self.assertTrue(self.is_view_of(t, v)) v[0, 1] = 0 self.assertIs(t, v._base)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning class TestViewOpsMPS(TestCaseMPS):
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestViewOpsMPS(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_contiguous_self
def test_contiguous_self(self, device="mps"): t = torch.ones(5, 5, device=device) s = t.contiguous() self.assertTrue(s is t)
def test_contiguous_self(self, device="mps"): t = torch.ones(5, 5, device=device) s = t.contiguous() self.assertIs(s, t)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning class TestViewOpsMPS(TestCaseMPS):
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestViewOpsMPS(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_contiguous_nonview
def test_contiguous_nonview(self, device="mps"): t = torch.ones(5, 5, device=device) nv = t.t().contiguous() self.assertTrue(not self.is_view_of(t, nv)) nv[0, 0] = 0 self.assertNotEqual(t[0, 0], nv[0, 0])
def test_contiguous_nonview(self, device="mps"): t = torch.ones(5, 5, device=device) nv = t.t().contiguous() self.assertFalse(self.is_view_of(t, nv)) nv[0, 0] = 0 self.assertNotEqual(t[0, 0], nv[0, 0])
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning class TestViewOpsMPS(TestCaseMPS):
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestViewOpsMPS(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_reshape_nonview
def test_reshape_nonview(self, device="mps"): t = torch.ones(5, 5, device=device) nv = torch.reshape(t.t(), (25,)) self.assertTrue(not self.is_view_of(t, nv)) nv[6] = 0 self.assertNotEqual(t[1, 1], nv[6])
def test_reshape_nonview(self, device="mps"): t = torch.ones(5, 5, device=device) nv = torch.reshape(t.t(), (25,)) self.assertFalse(self.is_view_of(t, nv)) nv[6] = 0 self.assertNotEqual(t[1, 1], nv[6])
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning class TestViewOpsMPS(TestCaseMPS):
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestViewOpsMPS(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
assert_is_nonview
def assert_is_nonview(t, nv): idx_t = (0,) * t.ndim idx_nv = (0,) * nv.ndim self.assertTrue(not nv._is_view()) nv[idx_nv] = 0 self.assertNotEqual(t[idx_t], nv[idx_nv]) t = torch.ones(2, 3, 2, 3, device=device).transpose(2, 3) nv = t.flatten(1, 3) assert_is_nonview(t, nv) t = torch.ones(2, 2, device=device).T nv = t.flatten() assert_is_nonview(t, nv) # flatten returns the original object if start_dim=end_dim t = t = torch.ones(2, 2, device=device) nv = t.flatten(1, 1) self.assertTrue(t is nv)
def assert_is_nonview(t, nv): idx_t = (0,) * t.ndim idx_nv = (0,) * nv.ndim self.assertFalse(nv._is_view()) nv[idx_nv] = 0 self.assertNotEqual(t[idx_t], nv[idx_nv]) t = torch.ones(2, 3, 2, 3, device=device).transpose(2, 3) nv = t.flatten(1, 3) assert_is_nonview(t, nv) t = torch.ones(2, 2, device=device).T nv = t.flatten() assert_is_nonview(t, nv) # flatten returns the original object if start_dim=end_dim t = t = torch.ones(2, 2, device=device) nv = t.flatten(1, 1) self.assertIs(t, nv)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_namedtensor.py
test_basic
def test_basic(op): a = torch.empty(2, 3, names=('N', 'C')) b = torch.empty(3, 2, names=('C', 'N')) c = torch.empty(3, names=('C',)) d = torch.empty(5, names=('W',)) self.assertEqual(op(a, a).names, ('N', 'C')) self.assertEqual(op(a, c).names, ('N', 'C')) with self.assertRaisesRegex(RuntimeError, "do not match"): op(a, d) with self.assertRaisesRegex(RuntimeError, "do not match"): op(a, b)
def test_basic(op): a = torch.empty(2, 3, names=('N', 'C')) b = torch.empty(3, 2, names=('C', 'N')) c = torch.empty(3, names=('C',)) d = torch.empty(5, names=('W',)) self.assertEqual(op(a, a).names, ('N', 'C')) self.assertEqual(op(a, c).names, ('N', 'C')) # TODO: dynamo will throw a slightly different # error message because it's adding fake tensors # `must match the size of` portion is the dynamo error with self.assertRaisesRegex(RuntimeError, "do not match|must match the size of"): op(a, d) with self.assertRaisesRegex(RuntimeError, "do not match|must match the size of"): op(a, b)
import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_device_type import get_all_device_types from collections import namedtuple, OrderedDict import itertools import functools import torch from torch import Tensor import torch.nn.functional as F from multiprocessing.reduction import ForkingPickler import pickle import io import sys import warnings Function = namedtuple('TestCase', ['name', 'lambd']) import numpy as np
import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY from torch.testing._internal.common_utils import skipIfTorchDynamo from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_device_type import get_all_device_types from collections import namedtuple, OrderedDict import itertools import functools import torch from torch import Tensor import torch.nn.functional as F from multiprocessing.reduction import ForkingPickler import pickle import io import sys import warnings Function = namedtuple('TestCase', ['name', 'lambd']) import numpy as np
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_namedtensor.py
test_unary_propagate_names_fns
def test_unary_propagate_names_fns(self): def _test(testcase, names=('N', 'D'), device='cpu'): sizes = [2] * len(names) tensor = torch.empty(sizes, names=names, device=device) try: out = testcase.lambd(tensor) except RuntimeError as err: # Get a better error message by catching the error and asserting. raise RuntimeError('{}: {}'.format(testcase.name, err)) from err self.assertEqual(out.names, tensor.names, msg=testcase.name) def fn(name, *args, **kwargs): return [Function(name, lambda t: getattr(torch, name)(t, *args, **kwargs))] def method(name, *args, **kwargs): return [Function(name, lambda t: getattr(t, name)(*args, **kwargs))] def out_function(name, *args, **kwargs): out_fn = getattr(torch, name) def fn(tensor): result = torch.empty([0], dtype=tensor.dtype, device=tensor.device) out_fn(tensor, *args, out=result, **kwargs) return result return [Function(name + '_out', fn)] def fn_method_and_inplace(name, *args, **kwargs): return ( method(name, *args, **kwargs) + method(name + '_', *args, **kwargs) + out_function(name, *args, **kwargs) ) # All of these operate on 2x2 tensors. tests = [ # unary pointwise fn_method_and_inplace('abs'), fn_method_and_inplace('acos'), fn_method_and_inplace('asin'), fn_method_and_inplace('atan'), fn_method_and_inplace('ceil'), fn_method_and_inplace('clamp', -1, 1), fn_method_and_inplace('clamp_min', -2), fn_method_and_inplace('clamp_max', 2), method('cauchy_'), method('clone'), method('contiguous'), fn_method_and_inplace('cos'), fn_method_and_inplace('cosh'), fn_method_and_inplace('digamma'), fn_method_and_inplace('erf'), fn_method_and_inplace('erfc'), fn_method_and_inplace('erfinv'), fn_method_and_inplace('exp'), fn_method_and_inplace('expm1'), method('exponential_'), fn_method_and_inplace('floor'), fn_method_and_inplace('frac'), method('geometric_', p=0.5), fn_method_and_inplace('lgamma'), fn_method_and_inplace('log'), fn_method_and_inplace('log10'), fn_method_and_inplace('log1p'), fn_method_and_inplace('log2'), method('log_normal_'), fn_method_and_inplace('neg'), method('normal_'), [Function('polygamma', lambda t: torch.polygamma(1, t))], method('polygamma_', 1), fn_method_and_inplace('reciprocal'), method('random_', 0, 1), method('random_', 1), method('random_'), method('relu_'), method('requires_grad_'), method('relu'), fn_method_and_inplace('round'), fn_method_and_inplace('rsqrt'), fn_method_and_inplace('sigmoid'), fn_method_and_inplace('sign'), fn_method_and_inplace('sin'), fn_method_and_inplace('sinh'), fn_method_and_inplace('sqrt'), fn_method_and_inplace('tan'), fn_method_and_inplace('tanh'), fn('threshold', 0, 1), fn('threshold_', 0, 1), out_function('threshold', 0, 1), fn_method_and_inplace('trunc'), method('uniform_'), method('zero_'), method('fill_', 1), method('fill_', torch.tensor(3.14)), # conversions method('to', dtype=torch.long), method('to', device='cpu'), method('to', torch.empty([])), method('bool'), method('byte'), method('char'), method('cpu'), method('double'), method('float'), method('long'), method('half'), method('int'), method('short'), method('type', dtype=torch.long), # cumsum and cumprod fn('cumsum', 0), fn('cumsum', 'D'), out_function('cumsum', 'D'), fn('cumprod', 0), fn('cumprod', 'D'), out_function('cumprod', 'D'), # views method('narrow', 0, 0, 1), # creation functions fn('empty_like'), fn('zeros_like'), fn('ones_like'), fn('full_like', 3.14), fn('rand_like'), fn('randn_like'), # bernoulli variants method('bernoulli_', 0.5), method('bernoulli_', torch.tensor(0.5)), method('softmax', dim=1), method('softmax', dim='D'), method('log_softmax', dim=1), method('log_softmax', dim='D'), [Function('F.dropout(inplace)', lambda t: F.dropout(t, p=0.5, inplace=True))], [Function('F.dropout(outplace)', lambda t: F.dropout(t, p=0.5, inplace=False))], ] tests = flatten(tests) for testcase, device in itertools.product(tests, get_all_device_types()): _test(testcase, device=device)
def test_unary_propagate_names_fns(self): def _test(testcase, names=('N', 'D'), device='cpu'): sizes = [2] * len(names) tensor = torch.empty(sizes, names=names, device=device) try: out = testcase.lambd(tensor) except RuntimeError as err: # Get a better error message by catching the error and asserting. raise RuntimeError(f'{testcase.name}: {err}') from err self.assertEqual(out.names, tensor.names, msg=testcase.name) def fn(name, *args, **kwargs): return [Function(name, lambda t: getattr(torch, name)(t, *args, **kwargs))] def method(name, *args, **kwargs): return [Function(name, lambda t: getattr(t, name)(*args, **kwargs))] def out_function(name, *args, **kwargs): out_fn = getattr(torch, name) def fn(tensor): result = torch.empty([0], dtype=tensor.dtype, device=tensor.device) out_fn(tensor, *args, out=result, **kwargs) return result return [Function(name + '_out', fn)] def fn_method_and_inplace(name, *args, **kwargs): return ( method(name, *args, **kwargs) + method(name + '_', *args, **kwargs) + out_function(name, *args, **kwargs) ) # All of these operate on 2x2 tensors. tests = [ # unary pointwise fn_method_and_inplace('abs'), fn_method_and_inplace('acos'), fn_method_and_inplace('asin'), fn_method_and_inplace('atan'), fn_method_and_inplace('ceil'), fn_method_and_inplace('clamp', -1, 1), fn_method_and_inplace('clamp_min', -2), fn_method_and_inplace('clamp_max', 2), method('cauchy_'), method('clone'), method('contiguous'), fn_method_and_inplace('cos'), fn_method_and_inplace('cosh'), fn_method_and_inplace('digamma'), fn_method_and_inplace('erf'), fn_method_and_inplace('erfc'), fn_method_and_inplace('erfinv'), fn_method_and_inplace('exp'), fn_method_and_inplace('expm1'), method('exponential_'), fn_method_and_inplace('floor'), fn_method_and_inplace('frac'), method('geometric_', p=0.5), fn_method_and_inplace('lgamma'), fn_method_and_inplace('log'), fn_method_and_inplace('log10'), fn_method_and_inplace('log1p'), fn_method_and_inplace('log2'), method('log_normal_'), fn_method_and_inplace('neg'), method('normal_'), [Function('polygamma', lambda t: torch.polygamma(1, t))], method('polygamma_', 1), fn_method_and_inplace('reciprocal'), method('random_', 0, 1), method('random_', 1), method('random_'), method('relu_'), method('requires_grad_'), method('relu'), fn_method_and_inplace('round'), fn_method_and_inplace('rsqrt'), fn_method_and_inplace('sigmoid'), fn_method_and_inplace('sign'), fn_method_and_inplace('sin'), fn_method_and_inplace('sinh'), fn_method_and_inplace('sqrt'), fn_method_and_inplace('tan'), fn_method_and_inplace('tanh'), fn('threshold', 0, 1), fn('threshold_', 0, 1), out_function('threshold', 0, 1), fn_method_and_inplace('trunc'), method('uniform_'), method('zero_'), method('fill_', 1), method('fill_', torch.tensor(3.14)), # conversions method('to', dtype=torch.long), method('to', device='cpu'), method('to', torch.empty([])), method('bool'), method('byte'), method('char'), method('cpu'), method('double'), method('float'), method('long'), method('half'), method('int'), method('short'), method('type', dtype=torch.long), # cumsum and cumprod fn('cumsum', 0), fn('cumsum', 'D'), out_function('cumsum', 'D'), fn('cumprod', 0), fn('cumprod', 'D'), out_function('cumprod', 'D'), # views method('narrow', 0, 0, 1), # creation functions fn('empty_like'), fn('zeros_like'), fn('ones_like'), fn('full_like', 3.14), fn('rand_like'), fn('randn_like'), # bernoulli variants method('bernoulli_', 0.5), method('bernoulli_', torch.tensor(0.5)), method('softmax', dim=1), method('softmax', dim='D'), method('log_softmax', dim=1), method('log_softmax', dim='D'), [Function('F.dropout(inplace)', lambda t: F.dropout(t, p=0.5, inplace=True))], [Function('F.dropout(outplace)', lambda t: F.dropout(t, p=0.5, inplace=False))], ] tests = flatten(tests) for testcase, device in itertools.product(tests, get_all_device_types()): _test(testcase, device=device)
import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_device_type import get_all_device_types from collections import namedtuple, OrderedDict import itertools import functools import torch from torch import Tensor import torch.nn.functional as F from multiprocessing.reduction import ForkingPickler import pickle import io import sys import warnings Function = namedtuple('TestCase', ['name', 'lambd']) class TestNamedTensor(TestCase): import numpy as np
import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY from torch.testing._internal.common_utils import skipIfTorchDynamo from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_device_type import get_all_device_types from collections import namedtuple, OrderedDict import itertools import functools import torch from torch import Tensor import torch.nn.functional as F from multiprocessing.reduction import ForkingPickler import pickle import io import sys import warnings Function = namedtuple('TestCase', ['name', 'lambd']) class TestNamedTensor(TestCase): import numpy as np
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_resize_mps_dtypes
shape = (2, 2) for dt in (torch.half, torch.bfloat16, torch.bool): x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device) x.resize_(shape) self.assertEqual(shape, x.shape)
def test_resize_mps_dtypes(self, device="mps"): shape = (2, 2) for dt in MPS_DTYPES: x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device) x.resize_(shape) self.assertEqual(shape, x.shape)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestViewOpsMPS(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_resize_as_all_dtypes_and_devices
def test_resize_as_all_dtypes_and_devices(self, device="mps"): for dt in (torch.half, torch.bfloat16, torch.bool): x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device) y = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=dt, device=device) x.resize_as_(y) self.assertEqual(y.shape, x.shape)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning class TestViewOpsMPS(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_mps.py
test_resize_as_mps_dtypes
x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device) y = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=dt, device=device) x.resize_as_(y) self.assertEqual(y.shape, x.shape)
def test_resize_as_mps_dtypes(self, device="mps"): for dt in MPS_DTYPES: x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device) y = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=dt, device=device) x.resize_as_(y) self.assertEqual(y.shape, x.shape)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestViewOpsMPS(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_index_put_accumulate_non_contiguous
def test_index_put_accumulate_non_contiguous(self, device="mps"): t = torch.zeros((5, 2, 2)) t_dev = t.to(device) t1 = t_dev[:, 0, :] t2 = t[:, 0, :] self.assertTrue(not t1.is_contiguous()) self.assertTrue(not t2.is_contiguous()) indices = [torch.tensor([0, 1]), ] indices_dev = [i.to(device) for i in indices] value = torch.randn(2, 2) out_mps = t1.index_put_(indices_dev, value.to(device), accumulate=True) out_cpu = t2.index_put_(indices, value, accumulate=True) self.assertTrue(not t1.is_contiguous()) self.assertTrue(not t2.is_contiguous()) self.assertEqual(out_mps.cpu(), out_cpu)
def test_index_put_accumulate_non_contiguous(self, device="mps"): t = torch.zeros((5, 2, 2)) t_dev = t.to(device) t1 = t_dev[:, 0, :] t2 = t[:, 0, :] self.assertFalse(t1.is_contiguous()) self.assertFalse(t2.is_contiguous()) indices = [torch.tensor([0, 1]), ] indices_dev = [i.to(device) for i in indices] value = torch.randn(2, 2) out_mps = t1.index_put_(indices_dev, value.to(device), accumulate=True) out_cpu = t2.index_put_(indices, value, accumulate=True) self.assertFalse(t1.is_contiguous()) self.assertFalse(t2.is_contiguous()) self.assertEqual(out_mps.cpu(), out_cpu)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning class TestAdvancedIndexing(TestCaseMPS):
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestAdvancedIndexing(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
fn1
def fn1(x): x[x < 50] = 1.0 return x
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
fn2
def fn2(x): x[0:50] = 1.0 return x scripted_fn1 = torch.jit.script(fn1) scripted_fn2 = torch.jit.script(fn2) data = torch.arange(100, device=device, dtype=torch.float) out = scripted_fn1(data.detach().clone()) ref = torch.tensor(np.concatenate((np.ones(50), np.arange(50, 100))), device=device, dtype=torch.float) self.assertEqual(out, ref) out = scripted_fn2(data.detach().clone()) self.assertEqual(out, ref)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_conv3d_single_stride
def test_conv3d_single_stride(self): # Conv3d is only available from MacOS 13.2 onwards y_cpu = torch.randn(2, 2, 3, 6) y_gpu = y_cpu.to(device='mps') for stride in range(1, 4): conv_cpu = torch.nn.Conv3d(in_channels=2, out_channels=2, kernel_size=2, stride=stride) conv_gpu = copy.deepcopy(conv_cpu).to(device='mps') x_cpu = conv_cpu(y_cpu) x_gpu = conv_gpu(y_gpu) self.assertEqual(x_cpu, x_gpu.cpu(), rtol=1e-03, atol=1e-05)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestConvolutionMPS(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_shape
def test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners): for grid_dim_contig_order in [(0, 1, 2, 3), (0, 3, 1, 2), (3, 0, 1, 2), (0, 2, 1, 3)]: # grid_dim_contig_order specifies the dimension order that can # make grid to be contiguous. # i.e., grid.permute(grid_dim_contig_order) is contiguous. # e.g., with grid_dim_contig_order=[0, 3, 1, 2], grid should be # initialized with contiguous tensor of shape [N, 2, H, W] # and permuted to [N, H, W, 2] afterwards. grid_shape = [N, H, W, 2] grid_init_shape = [grid_shape[d] for d in grid_dim_contig_order] grid_fwd_permute = [None, None, None, None] for i, d in enumerate(grid_dim_contig_order): grid_fwd_permute[d] = i def get_grid(device='cpu', data=None): if data is not None: assert list(data.shape) == grid_shape data = data.permute(grid_dim_contig_order).to(device) else: data = torch.randn(grid_init_shape, device=device) grid = data.permute(grid_fwd_permute) assert grid.permute(grid_dim_contig_order).is_contiguous() return grid input_cpu = torch.randn(C, N, IH, IW).transpose(0, 1).requires_grad_(input_requires_grad) grid_cpu = get_grid().requires_grad_() out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertTrue(out_cpu.size() == torch.Size([N, C, H, W])) gradients = torch.randn_like(out_cpu) out_cpu.backward(gradients) # Compare against unvectorized CPU fallback # NOTE [ grid_sample CPU fallback ] # grid_sample uses AVX for 2d images, but that requires 32-bit indexing for # 32-bit floats. So we also have a fallback that is used only for float tensors # requiring 64-bit indexing. That requires too much memory to run on CI, so we # also export the fallback and test it here to ensure feature parity with # the vectorized version. input_fallback = input_cpu.float().detach_().requires_grad_() grid_fallback = grid_cpu.float().detach_().requires_grad_() out_fallback = torch._grid_sampler_2d_cpu_fallback( input_fallback, grid_fallback, F.GRID_SAMPLE_INTERPOLATION_MODES[mode], F.GRID_SAMPLE_PADDING_MODES[padding_mode], align_corners) self.assertEqual(out_fallback, out_cpu.float(), atol=1e-5, rtol=5e-5) out_fallback.backward(gradients.float()) if input_requires_grad: self.assertEqual(input_fallback.grad, input_cpu.grad.float(), atol=1e-4, rtol=5e-5) self.assertEqual(grid_fallback.grad, grid_cpu.grad.float(), atol=1e-4, rtol=5e-5) input_mps = input_cpu.detach().transpose(0, 1).to("mps").transpose(0, 1).requires_grad_(input_requires_grad) grid_mps = get_grid('mps', grid_cpu.detach()).requires_grad_() out_mps = F.grid_sample(input_mps, grid_mps, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(out_cpu, out_mps) out_mps.backward(gradients.to("mps")) if input_requires_grad: self.assertEqual(input_cpu.grad, input_mps.grad) self.assertEqual(grid_cpu.grad, grid_mps.grad, atol=5e-5, rtol=0) # check that zero-dimensional input strides don't error out base_input = torch.randn(N, C, 1, IW) input_cpu = base_input.expand_as(input_mps).requires_grad_(input_requires_grad) out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode, align_corners=align_corners) input_mps = base_input.to("mps").expand_as(input_mps).requires_grad_(input_requires_grad) out_mps = F.grid_sample(input_mps, grid_mps, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(out_cpu, out_mps) # test same size output test_shape(N, C, H, W, H, W, mode, padding_mode, align_corners) # test larger output N = random.randint(2, 8) C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(IH + 1, 12) W = random.randint(IW + 1, 12) test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners) # test smaller output N = random.randint(2, 8) C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(2, IH) W = random.randint(2, IW) test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners) # test 1x1 inpput N = random.randint(2, 8) C = random.randint(2, 8) IH = 1 IW = 1 H = random.randint(2, 5) W = random.randint(2, 5) test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners) # testing empty grid N = random.randint(2, 8) C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) W = random.randint(3, IW + 2) test_shape(N, C, IH, IW, 0, W, mode, padding_mode, align_corners) # testing empty channel N = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(3, IH + 2) W = random.randint(3, IW + 2) test_shape(N, 0, IH, IW, H, W, mode, padding_mode, align_corners) # testing empty batch C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(3, IH + 2) W = random.randint(3, IW + 2) test_shape(0, C, IH, IW, H, W, mode, padding_mode, align_corners)
def test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners): for grid_dim_contig_order in [(0, 1, 2, 3), (0, 3, 1, 2), (3, 0, 1, 2), (0, 2, 1, 3)]: # grid_dim_contig_order specifies the dimension order that can # make grid to be contiguous. # i.e., grid.permute(grid_dim_contig_order) is contiguous. # e.g., with grid_dim_contig_order=[0, 3, 1, 2], grid should be # initialized with contiguous tensor of shape [N, 2, H, W] # and permuted to [N, H, W, 2] afterwards. grid_shape = [N, H, W, 2] grid_init_shape = [grid_shape[d] for d in grid_dim_contig_order] grid_fwd_permute = [None, None, None, None] for i, d in enumerate(grid_dim_contig_order): grid_fwd_permute[d] = i def get_grid(device='cpu', data=None): if data is not None: assert list(data.shape) == grid_shape data = data.permute(grid_dim_contig_order).to(device) else: data = torch.randn(grid_init_shape, device=device) grid = data.permute(grid_fwd_permute) assert grid.permute(grid_dim_contig_order).is_contiguous() return grid input_cpu = torch.randn(C, N, IH, IW).transpose(0, 1).requires_grad_(input_requires_grad) grid_cpu = get_grid().requires_grad_() out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(out_cpu.size(), torch.Size([N, C, H, W])) gradients = torch.randn_like(out_cpu) out_cpu.backward(gradients) # Compare against unvectorized CPU fallback # NOTE [ grid_sample CPU fallback ] # grid_sample uses AVX for 2d images, but that requires 32-bit indexing for # 32-bit floats. So we also have a fallback that is used only for float tensors # requiring 64-bit indexing. That requires too much memory to run on CI, so we # also export the fallback and test it here to ensure feature parity with # the vectorized version. input_fallback = input_cpu.float().detach_().requires_grad_() grid_fallback = grid_cpu.float().detach_().requires_grad_() out_fallback = torch._grid_sampler_2d_cpu_fallback( input_fallback, grid_fallback, F.GRID_SAMPLE_INTERPOLATION_MODES[mode], F.GRID_SAMPLE_PADDING_MODES[padding_mode], align_corners) self.assertEqual(out_fallback, out_cpu.float(), atol=1e-5, rtol=5e-5) out_fallback.backward(gradients.float()) if input_requires_grad: self.assertEqual(input_fallback.grad, input_cpu.grad.float(), atol=1e-4, rtol=5e-5) self.assertEqual(grid_fallback.grad, grid_cpu.grad.float(), atol=1e-4, rtol=5e-5) input_mps = input_cpu.detach().transpose(0, 1).to("mps").transpose(0, 1).requires_grad_(input_requires_grad) grid_mps = get_grid('mps', grid_cpu.detach()).requires_grad_() out_mps = F.grid_sample(input_mps, grid_mps, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(out_cpu, out_mps) out_mps.backward(gradients.to("mps")) if input_requires_grad: self.assertEqual(input_cpu.grad, input_mps.grad) self.assertEqual(grid_cpu.grad, grid_mps.grad, atol=5e-5, rtol=0) # check that zero-dimensional input strides don't error out base_input = torch.randn(N, C, 1, IW) input_cpu = base_input.expand_as(input_mps).requires_grad_(input_requires_grad) out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode, align_corners=align_corners) input_mps = base_input.to("mps").expand_as(input_mps).requires_grad_(input_requires_grad) out_mps = F.grid_sample(input_mps, grid_mps, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(out_cpu, out_mps) # test same size output test_shape(N, C, H, W, H, W, mode, padding_mode, align_corners) # test larger output N = random.randint(2, 8) C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(IH + 1, 12) W = random.randint(IW + 1, 12) test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners) # test smaller output N = random.randint(2, 8) C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(2, IH) W = random.randint(2, IW) test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners) # test 1x1 inpput N = random.randint(2, 8) C = random.randint(2, 8) IH = 1 IW = 1 H = random.randint(2, 5) W = random.randint(2, 5) test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners) # testing empty grid N = random.randint(2, 8) C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) W = random.randint(3, IW + 2) test_shape(N, C, IH, IW, 0, W, mode, padding_mode, align_corners) # testing empty channel N = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(3, IH + 2) W = random.randint(3, IW + 2) test_shape(N, 0, IH, IW, H, W, mode, padding_mode, align_corners) # testing empty batch C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(3, IH + 2) W = random.randint(3, IW + 2) test_shape(0, C, IH, IW, H, W, mode, padding_mode, align_corners)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_nonzero_non_diff
def test_nonzero_non_diff(self): device = "mps" x = torch.randn(10, requires_grad=True) nz = x.nonzero() self.assertFalse(nz.requires_grad)
def test_nonzero_non_diff(self): device = "mps" x = torch.randn(10, requires_grad=True, device=device) nz = x.nonzero() self.assertFalse(nz.requires_grad)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning class TestAdvancedIndexing(TestCaseMPS):
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestAdvancedIndexing(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_nonzero_multi_threading
def test_nonzero_multi_threading(self): # Test that MPS doesn't crash if nonzero called concurrently # See https://github.com/pytorch/pytorch/issues/100285 x = torch.rand(3, 3, device="mps") t1 = threading.Thread(target=torch.nonzero, args=(x,)) t2 = threading.Thread(target=torch.nonzero, args=(x,)) t1.start() t2.start()
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestAdvancedIndexing(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_lstm_1
def test_lstm_1(self, device="mps", dtype=torch.float32): for layers in [1] if product_version < 13.0 else [1, 2, 5]: torch.random.manual_seed(42) rnn = nn.LSTM(7, 4, layers, device="cpu") input = torch.randn(2, 3, 7, device="cpu") hx = torch.randn(layers, 3, 4, device="cpu") cx = torch.randn(layers, 3, 4, device="cpu") cpu_output, (cpu_hn, cpu_cn) = rnn(input, (hx, cx)) rnn = rnn.to(device) input = input.to(device) hx = hx.to(device) cx = cx.to(device) output, (hn, cn) = rnn(input, (hx, cx)) self.assertEqual(cpu_output, output) self.assertEqual(cpu_hn, hn) self.assertEqual(cpu_cn, cn) # test batch_first rnn = nn.LSTM(7, 4, layers, device="cpu", batch_first=True) input = torch.randn(3, 2, 7, device="cpu") hx = torch.randn(layers, 3, 4, device="cpu") cx = torch.randn(layers, 3, 4, device="cpu") cpu_output, (cpu_hn, cpu_cn) = rnn(input, (hx, cx)) rnn = rnn.to(device) input = input.to(device) hx = hx.to(device) cx = cx.to(device) output, (hn, cn) = rnn(input, (hx, cx)) self.assertEqual(cpu_output, output) self.assertEqual(cpu_hn, hn) self.assertEqual(cpu_cn, cn)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning class TestRNNMPS(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_mps.py
test_nextafter
def test_nextafter(self, device="mps"): for dtype in [torch.float16, torch.float32]: x = torch.tensor([1, -1, 0, 0, 2, -2], device=device, dtype=dtype) y = torch.tensor([2, -2, -1, 1, -3, 3], device=device, dtype=dtype) na = torch.nextafter(x, y) na_cpu = torch.nextafter(x.cpu(), y.cpu()) na_ge_x_mps = na.cpu() > x.cpu() # greater is broken on MPS, see https://github.com/pytorch/pytorch/issues/125051 na_ge_x_cpu = na_cpu > x.cpu() self.assertEqual(na_ge_x_mps, na_ge_x_cpu)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestAdvancedIndexing(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_lstm_backward
def test_lstm_backward(self, device="mps", dtype=torch.float32): for layers in [1] if product_version < 13.0 else [1, 2, 5]: lstm = nn.LSTM(2, 4, layers) # initialized globally for consistent parameters init lstm.train() def get_results(device, inp, hx, cx): rnn = lstm.to(device) inp, hx, cx = inp.to(device), hx.to(device), cx.to(device) output, _ = rnn(inp, (hx, cx)) f = output.sum() param_names, params = zip(*rnn.named_parameters()) param_grads = zip(param_names, torch.autograd.grad(f, params, retain_graph=True)) input_grad, hx_grad, cx_grad = torch.autograd.grad(f, [inp, hx, cx]) return output, param_grads, input_grad, hx_grad, cx_grad inp = torch.randn((5, 3, 2), requires_grad=True, dtype=dtype, device=device) hx = torch.randn((layers, 3, 4), requires_grad=True, dtype=dtype, device=device) cx = torch.randn((layers, 3, 4), requires_grad=True, dtype=dtype, device=device) cpu_output, cpu_weights_grad, cpu_input_grad, cpu_hx_grad, cpu_cx_grad = get_results("cpu", inp, hx, cx) mps_output, mps_weights_grad, mps_input_grad, mps_hx_grad, mps_cx_grad = get_results(device, inp, hx, cx) self.assertEqual(cpu_hx_grad, mps_hx_grad) self.assertEqual(cpu_cx_grad, mps_cx_grad) self.assertEqual(cpu_output, mps_output) self.assertEqual(cpu_input_grad, mps_input_grad) for (cpu_name, cpu_weight_grad), (mps_name, mps_weight_grad) in zip(cpu_weights_grad, mps_weights_grad): self.assertEqual(cpu_weight_grad, mps_weight_grad, f"mismatch in cpu:{cpu_name} vs mps:{mps_name}") # test batch_first backward lstm = nn.LSTM(2, 4, layers, batch_first=True) lstm.train() hx = torch.randn((layers, 5, 4), requires_grad=True, dtype=dtype, device=device) cx = torch.randn((layers, 5, 4), requires_grad=True, dtype=dtype, device=device) cpu_output, cpu_weights_grad, cpu_input_grad, cpu_hx_grad, cpu_cx_grad = get_results("cpu", inp, hx, cx) mps_output, mps_weights_grad, mps_input_grad, mps_hx_grad, mps_cx_grad = get_results(device, inp, hx, cx) self.assertEqual(cpu_hx_grad, mps_hx_grad) self.assertEqual(cpu_cx_grad, mps_cx_grad) self.assertEqual(cpu_output, mps_output) self.assertEqual(cpu_input_grad, mps_input_grad) for (cpu_name, cpu_weight_grad), (mps_name, mps_weight_grad) in zip(cpu_weights_grad, mps_weights_grad): self.assertEqual(cpu_weight_grad, mps_weight_grad, f"mismatch in cpu:{cpu_name} vs mps:{mps_name}")
def test_lstm_backward(self, device="mps", dtype=torch.float32): for num_layers in [1, 2, 5]: for test_options in self.LSTM_TEST_CASES: self._lstm_helper(num_layers=num_layers, dtype=dtype, device=device, backward=True, **test_options)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning class TestRNNMPS(TestCaseMPS):
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestRNNMPS(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
get_results
def get_results(device, inp, hx, cx): rnn = lstm.to(device) inp, hx, cx = inp.to(device), hx.to(device), cx.to(device) output, _ = rnn(inp, (hx, cx)) f = output.sum() param_names, params = zip(*rnn.named_parameters()) param_grads = zip(param_names, torch.autograd.grad(f, params, retain_graph=True)) input_grad, hx_grad, cx_grad = torch.autograd.grad(f, [inp, hx, cx]) return output, param_grads, input_grad, hx_grad, cx_grad inp = torch.randn((5, 3, 2), requires_grad=True, dtype=dtype, device=device) hx = torch.randn((layers, 3, 4), requires_grad=True, dtype=dtype, device=device) cx = torch.randn((layers, 3, 4), requires_grad=True, dtype=dtype, device=device) cpu_output, cpu_weights_grad, cpu_input_grad, cpu_hx_grad, cpu_cx_grad = get_results("cpu", inp, hx, cx) mps_output, mps_weights_grad, mps_input_grad, mps_hx_grad, mps_cx_grad = get_results(device, inp, hx, cx) self.assertEqual(cpu_hx_grad, mps_hx_grad) self.assertEqual(cpu_cx_grad, mps_cx_grad) self.assertEqual(cpu_output, mps_output) self.assertEqual(cpu_input_grad, mps_input_grad) for (cpu_name, cpu_weight_grad), (mps_name, mps_weight_grad) in zip(cpu_weights_grad, mps_weights_grad): self.assertEqual(cpu_weight_grad, mps_weight_grad, f"mismatch in cpu:{cpu_name} vs mps:{mps_name}") # test batch_first backward lstm = nn.LSTM(2, 4, layers, batch_first=True) lstm.train() hx = torch.randn((layers, 5, 4), requires_grad=True, dtype=dtype, device=device) cx = torch.randn((layers, 5, 4), requires_grad=True, dtype=dtype, device=device) cpu_output, cpu_weights_grad, cpu_input_grad, cpu_hx_grad, cpu_cx_grad = get_results("cpu", inp, hx, cx) mps_output, mps_weights_grad, mps_input_grad, mps_hx_grad, mps_cx_grad = get_results(device, inp, hx, cx) self.assertEqual(cpu_hx_grad, mps_hx_grad) self.assertEqual(cpu_cx_grad, mps_cx_grad) self.assertEqual(cpu_output, mps_output) self.assertEqual(cpu_input_grad, mps_input_grad) for (cpu_name, cpu_weight_grad), (mps_name, mps_weight_grad) in zip(cpu_weights_grad, mps_weights_grad): self.assertEqual(cpu_weight_grad, mps_weight_grad, f"mismatch in cpu:{cpu_name} vs mps:{mps_name}")
class TestRNNMPS(TestCaseMPS): def _lstm_helper(self, num_layers, dtype, device, bidirectional=False, bias=True, batch_first=False, seq_len=3, batch_size=5, hidden_size=7, input_size=11, backward=False): rnn = nn.LSTM( input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, bias=bias, bidirectional=bidirectional, batch_first=batch_first, device="cpu" ) bidirectional_mul = 2 if bidirectional else 1 if batch_first: input = torch.randn(batch_size, seq_len, input_size, device="cpu", dtype=dtype, requires_grad=backward) hx = torch.randn(num_layers * bidirectional_mul, batch_size, hidden_size, device="cpu", dtype=dtype, requires_grad=backward) cx = torch.randn(num_layers * bidirectional_mul, batch_size, hidden_size, device="cpu", dtype=dtype, requires_grad=backward) else: input = torch.randn(seq_len, batch_size, input_size, device="cpu", dtype=dtype, requires_grad=backward) hx = torch.randn(num_layers * bidirectional_mul, batch_size, hidden_size, device="cpu", dtype=dtype, requires_grad=backward) cx = torch.randn(num_layers * bidirectional_mul, batch_size, hidden_size, device="cpu", dtype=dtype, requires_grad=backward) cpu_output, (cpu_hn, cpu_cn) = rnn(input, (hx, cx)) rnn = rnn.to(device) input = input.to(device) hx = hx.to(device) cx = cx.to(device) output, (hn, cn) = rnn(input, (hx, cx)) self.assertEqual(cpu_output, output) self.assertEqual(cpu_hn, hn) self.assertEqual(cpu_cn, cn)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_mps.py
get_backward_results
cpu_output, cpu_weights_grad, cpu_input_grad, cpu_hx_grad, cpu_cx_grad = get_results("cpu", inp, hx, cx) mps_output, mps_weights_grad, mps_input_grad, mps_hx_grad, mps_cx_grad = get_results(device, inp, hx, cx) self.assertEqual(cpu_hx_grad, mps_hx_grad) self.assertEqual(cpu_cx_grad, mps_cx_grad) self.assertEqual(cpu_output, mps_output) self.assertEqual(cpu_input_grad, mps_input_grad) for (cpu_name, cpu_weight_grad), (mps_name, mps_weight_grad) in zip(cpu_weights_grad, mps_weights_grad): self.assertEqual(cpu_weight_grad, mps_weight_grad, f"mismatch in cpu:{cpu_name} vs mps:{mps_name}") # test batch_first backward lstm = nn.LSTM(2, 4, layers, batch_first=True) lstm.train() hx = torch.randn((layers, 5, 4), requires_grad=True, dtype=dtype, device=device) cx = torch.randn((layers, 5, 4), requires_grad=True, dtype=dtype, device=device) cpu_output, cpu_weights_grad, cpu_input_grad, cpu_hx_grad, cpu_cx_grad = get_results("cpu", inp, hx, cx) mps_output, mps_weights_grad, mps_input_grad, mps_hx_grad, mps_cx_grad = get_results(device, inp, hx, cx) self.assertEqual(cpu_hx_grad, mps_hx_grad) self.assertEqual(cpu_cx_grad, mps_cx_grad) self.assertEqual(cpu_output, mps_output) self.assertEqual(cpu_input_grad, mps_input_grad) for (cpu_name, cpu_weight_grad), (mps_name, mps_weight_grad) in zip(cpu_weights_grad, mps_weights_grad): self.assertEqual(cpu_weight_grad, mps_weight_grad, f"mismatch in cpu:{cpu_name} vs mps:{mps_name}")
def get_backward_results(rnn, device, inp, hx, cx, output_grad_presented=True, states_grad_presented=True): rnn = rnn.to(device) inp, hx, cx = inp.to(device), hx.to(device), cx.to(device) output, (hx_out, cx_out) = rnn(inp, (hx, cx)) assert output_grad_presented or states_grad_presented, "At least some outputs must be used" f = 0 if output_grad_presented: f = f + 3 * output.sum() if states_grad_presented: f = f + (hx_out * cx_out).sum() param_names, params = zip(*rnn.named_parameters()) param_grads = zip(param_names, torch.autograd.grad(f, params, retain_graph=True)) input_grad, hx_grad, cx_grad = torch.autograd.grad(f, [inp, hx, cx]) return output, param_grads, input_grad, hx_grad, cx_grad if backward: grad_cases = [ dict(output_grad_presented=True, states_grad_presented=True), dict(output_grad_presented=False, states_grad_presented=True), dict(output_grad_presented=True, states_grad_presented=False), ] for grad_case in grad_cases: cpu_output, cpu_weights_grad, cpu_input_grad, cpu_hx_grad, cpu_cx_grad =\ get_backward_results(rnn, "cpu", input, hx, cx, **grad_case) mps_output, mps_weights_grad, mps_input_grad, mps_hx_grad, mps_cx_grad =\ get_backward_results(rnn, device, input, hx, cx, **grad_case) self.assertEqual(cpu_hx_grad, mps_hx_grad) self.assertEqual(cpu_cx_grad, mps_cx_grad) self.assertEqual(cpu_output, mps_output) self.assertEqual(cpu_input_grad, mps_input_grad) for (cpu_name, cpu_weight_grad), (mps_name, mps_weight_grad) in zip(cpu_weights_grad, mps_weights_grad): self.assertEqual(cpu_weight_grad, mps_weight_grad, f"mismatch in cpu:{cpu_name} vs mps:{mps_name}, layers: {num_layers}")
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_lstm_forward
def test_lstm_forward(self, device="mps", dtype=torch.float32): for num_layers in [1, 2, 5]: for test_options in self.LSTM_TEST_CASES: self._lstm_helper(num_layers=num_layers, dtype=dtype, device=device, **test_options)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestRNNMPS(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
helper
def helper(val, shape): tensor = torch.zeros(shape, device='mps') tensor_mps = tensor.fill_(val) tensor_mps = torch.tanh(tensor_mps) tensor_0 = torch.zeros(shape, device='cpu') tensor_cpu = tensor_0.fill_(val) tensor_cpu = torch.tanh(tensor_cpu) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024]) helper(0.2, [2, 3])
def helper(val, shape, dtype): tensor = torch.zeros(shape, device='mps', dtype=dtype) tensor_mps = tensor.fill_(val) tensor_0 = torch.zeros(shape, device='cpu', dtype=dtype) tensor_cpu = tensor_0.fill_(val) self.assertEqual(tensor_mps, tensor_cpu) helper(0, [1024], torch.float32) helper(0.2, [2, 3], torch.float32) helper(0.2 + 0.5j, [2, 3], torch.complex64)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
req_grad
def req_grad(t): return isinstance(t, torch.Tensor) and t.requires_grad diff_cpu_out = tuple(t for t in cpu_out if req_grad(t)) diff_mps_out = tuple(t for t in mps_out if req_grad(t)) diff_cpu_arg = tuple(t for t in pytree.tree_flatten((cpu_args, cpu_kwargs))[0] if req_grad(t)) diff_mps_arg = tuple(t for t in pytree.tree_flatten((mps_args, mps_kwargs))[0] if req_grad(t)) self.assertEqual(len(diff_cpu_out), len(diff_mps_out)) self.assertEqual(len(diff_cpu_arg), len(diff_mps_arg)) if len(diff_cpu_out) == 0: continue # rand_like does not work with certain dtypes, so cast to double and cast back cpu_grad_outputs = tuple(torch.rand_like(t.to(dtype=torch.double)).to(dtype=dtype) for t in diff_cpu_out) mps_grad_outputs = tuple(t.to("mps") for t in cpu_grad_outputs) # Compare computed gradients with cpu given random grad_output vector # Sometimes when the derivative is 0, we just don't bother creating the graph # allow_unused is needed in those cases. cpu_grad_inputs = torch.autograd.grad(diff_cpu_out, diff_cpu_arg, grad_outputs=cpu_grad_outputs, allow_unused=True) mps_grad_inputs = torch.autograd.grad(diff_mps_out, diff_mps_arg, grad_outputs=mps_grad_outputs, allow_unused=True) self.assertEqual(cpu_grad_inputs, mps_grad_inputs, atol=atol, rtol=rtol)
def req_grad(t): return isinstance(t, torch.Tensor) and t.requires_grad diff_cpu_out = tuple(t for t in cpu_out if req_grad(t)) diff_mps_out = tuple(t for t in mps_out if req_grad(t)) diff_cpu_arg = tuple(t for t in pytree.tree_leaves((cpu_args, cpu_kwargs)) if req_grad(t)) diff_mps_arg = tuple(t for t in pytree.tree_leaves((mps_args, mps_kwargs)) if req_grad(t)) self.assertEqual(len(diff_cpu_out), len(diff_mps_out)) self.assertEqual(len(diff_cpu_arg), len(diff_mps_arg)) if len(diff_cpu_out) == 0: continue # rand_like does not work with certain dtypes, so cast to double and cast back cpu_grad_outputs = tuple(torch.rand_like(t, dtype=torch.double).to(dtype=t.dtype) for t in diff_cpu_out) mps_grad_outputs = tuple(t.to("mps") for t in cpu_grad_outputs) # Compare computed gradients with cpu given random grad_output vector # Sometimes when the derivative is 0, we just don't bother creating the graph # allow_unused is needed in those cases. cpu_grad_inputs = torch.autograd.grad(diff_cpu_out, diff_cpu_arg, grad_outputs=cpu_grad_outputs, allow_unused=True) mps_grad_inputs = torch.autograd.grad(diff_mps_out, diff_mps_arg, grad_outputs=mps_grad_outputs, allow_unused=True) self.assertEqual(cpu_grad_inputs, mps_grad_inputs, atol=atol, rtol=rtol)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning MPS_DTYPES = get_all_dtypes()
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix MPS_DTYPES = get_all_dtypes() MPS_GRAD_DTYPES = [torch.float32, torch.float16]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
req_grad
def req_grad(t): return isinstance(t, torch.Tensor) and t.requires_grad diff_cpu_out = tuple(t for t in cpu_out if req_grad(t)) diff_mps_out = tuple(t for t in mps_out if req_grad(t)) diff_cpu_arg = tuple(t for t in pytree.tree_flatten((cpu_args, cpu_kwargs))[0] if req_grad(t)) diff_mps_arg = tuple(t for t in pytree.tree_flatten((mps_args, mps_kwargs))[0] if req_grad(t)) self.assertEqual(len(diff_cpu_out), len(diff_mps_out)) self.assertEqual(len(diff_cpu_arg), len(diff_mps_arg)) if len(diff_cpu_out) == 0: continue # rand_like does not work with certain dtypes, so cast to double and cast back cpu_grad_outputs = tuple(torch.rand_like(t.to(dtype=torch.double)).to(dtype=dtype) for t in diff_cpu_out) mps_grad_outputs = tuple(t.to("mps") for t in cpu_grad_outputs) # Compare computed gradients with cpu given random grad_output vector # Sometimes when the derivative is 0, we just don't bother creating the graph # allow_unused is needed in those cases. cpu_grad_inputs = torch.autograd.grad(diff_cpu_out, diff_cpu_arg, grad_outputs=cpu_grad_outputs, allow_unused=True) mps_grad_inputs = torch.autograd.grad(diff_mps_out, diff_mps_arg, grad_outputs=mps_grad_outputs, allow_unused=True) self.assertEqual(cpu_grad_inputs, mps_grad_inputs, atol=atol, rtol=rtol)
def req_grad(t): return isinstance(t, torch.Tensor) and t.requires_grad diff_cpu_out = tuple(t for t in cpu_out if req_grad(t)) diff_mps_out = tuple(t for t in mps_out if req_grad(t)) diff_cpu_arg = tuple(t for t in pytree.tree_leaves((cpu_args, cpu_kwargs)) if req_grad(t)) diff_mps_arg = tuple(t for t in pytree.tree_leaves((mps_args, mps_kwargs)) if req_grad(t)) self.assertEqual(len(diff_cpu_out), len(diff_mps_out)) self.assertEqual(len(diff_cpu_arg), len(diff_mps_arg)) if len(diff_cpu_out) == 0: continue # rand_like does not work with certain dtypes, so cast to double and cast back cpu_grad_outputs = tuple(torch.rand_like(t, dtype=torch.double).to(dtype=t.dtype) for t in diff_cpu_out) mps_grad_outputs = tuple(t.to("mps") for t in cpu_grad_outputs) # Compare computed gradients with cpu given random grad_output vector # Sometimes when the derivative is 0, we just don't bother creating the graph # allow_unused is needed in those cases. cpu_grad_inputs = torch.autograd.grad(diff_cpu_out, diff_cpu_arg, grad_outputs=cpu_grad_outputs, allow_unused=True) mps_grad_inputs = torch.autograd.grad(diff_mps_out, diff_mps_arg, grad_outputs=mps_grad_outputs, allow_unused=True) self.assertEqual(cpu_grad_inputs, mps_grad_inputs, atol=atol, rtol=rtol)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning MPS_DTYPES = get_all_dtypes()
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix MPS_DTYPES = get_all_dtypes() MPS_GRAD_DTYPES = [torch.float32, torch.float16]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test_error_inputs
if all_forward_pass and generate_new_truth: if dtype_abbrs[dtype] not in self.NEW_ALLOW_LIST[op.name]: self.NEW_ALLOW_LIST[op.name].append(dtype_abbrs[dtype]) # We could write it only once. But I don't know how to detect that the current test is the last one # So each test append to the dict and write it. with open("new_mps_allowlist.txt", "w") as f: pprint.pprint(self.NEW_ALLOW_LIST, stream=f) if all_backward_pass and generate_new_truth and dtype.is_floating_point: if dtype_abbrs[dtype] not in self.NEW_ALLOW_LIST_GRAD[op.name]: self.NEW_ALLOW_LIST_GRAD[op.name].append(dtype_abbrs[dtype]) # We could write it only once. But I don't know how to detect that the current test is the last one # So each test append to the dict and write it. with open("new_mps_allowlist_grad.txt", "w") as f: pprint.pprint(self.NEW_ALLOW_LIST_GRAD, stream=f)
def test_error_inputs(self, device, op): self.assertEqual(device, "mps:0") # TODO: Enable per-sample seed setting and tweak tolerances / fix xfails mps_samples = op.error_inputs(device, set_seed=False) for mps_sample in mps_samples: mps_sample_input = mps_sample.sample_input error_type = mps_sample.error_type error_regex = mps_sample.error_regex mps_args = [mps_sample_input.input] + list(mps_sample_input.args) mps_kwargs = mps_sample_input.kwargs # for tensor_split(), the second tensor arg ("tensor_indices_or_sections") must be on CPU only if (op.name == "tensor_split" and isinstance(mps_args[1], torch.Tensor)): mps_args[1] = mps_args[1].cpu() with self.assertRaisesRegex(error_type, error_regex): op(*mps_args, **mps_kwargs)
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix MPS_DTYPES = get_all_dtypes() MPS_GRAD_DTYPES = [torch.float32, torch.float16] class TestErrorInputs(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_tensor_scalar_binops
def test_tensor_scalar_binops(self): # Regression test for https://github.com/pytorch/pytorch/issues/119088 def to_cpu(x): return x.cpu() if isinstance(x, torch.Tensor) else x # Allocate tensors on mps with torch.device("mps"): inputs = [torch.rand(2, dtype=dtype) for dtype in [torch.float, torch.half, torch.cfloat]] self.assertTrue(all(x.device.type == "mps" for x in inputs)) # Add scalars inputs.extend([7, 3.14, 2 + 3j, torch.tensor(4 + 5j, dtype=torch.chalf)]) # Iterate over all permutations of types(int, float, complex, half) and ops (excluding div) for x, y in itertools.product(inputs, inputs): for op_name in ["__add__", "__sub__", "__mul__"]: x_cpu, y_cpu = map(to_cpu, (x, y)) res = getattr(x, op_name)(y) res_cpu = getattr(x_cpu, op_name)(y_cpu) self.assertEqual(to_cpu(res), res_cpu, f"{op_name}({x}, {y}) produces different results {res} vs {res_cpu}") # Copied from `TestCommon` in `test_ops.py`, just enough to duplicate the `test_numpy_ref` for MPS
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix MPS_DTYPES = get_all_dtypes() MPS_GRAD_DTYPES = [torch.float32, torch.float16] class TestComplex(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_mps.py
test_lstm_backward
def test_lstm_backward(self, device="mps", dtype=torch.float32): for layers in [1] if product_version < 13.0 else [1, 2, 5]: lstm = nn.LSTM(2, 4, layers) # initialized globally for consistent parameters init lstm.train() def get_results(device, inp, hx, cx): rnn = lstm.to(device) inp, hx, cx = inp.to(device), hx.to(device), cx.to(device) output, _ = rnn(inp, (hx, cx)) f = output.sum() param_names, params = zip(*rnn.named_parameters()) param_grads = zip(param_names, torch.autograd.grad(f, params, retain_graph=True)) input_grad, hx_grad, cx_grad = torch.autograd.grad(f, [inp, hx, cx]) return output, param_grads, input_grad, hx_grad, cx_grad inp = torch.randn((5, 3, 2), requires_grad=True, dtype=dtype, device=device) hx = torch.randn((layers, 3, 4), requires_grad=True, dtype=dtype, device=device) cx = torch.randn((layers, 3, 4), requires_grad=True, dtype=dtype, device=device) cpu_output, cpu_weights_grad, cpu_input_grad, cpu_hx_grad, cpu_cx_grad = get_results("cpu", inp, hx, cx) mps_output, mps_weights_grad, mps_input_grad, mps_hx_grad, mps_cx_grad = get_results(device, inp, hx, cx) self.assertEqual(cpu_hx_grad, mps_hx_grad) self.assertEqual(cpu_cx_grad, mps_cx_grad) self.assertEqual(cpu_output, mps_output) self.assertEqual(cpu_input_grad, mps_input_grad) for (cpu_name, cpu_weight_grad), (mps_name, mps_weight_grad) in zip(cpu_weights_grad, mps_weights_grad): self.assertEqual(cpu_weight_grad, mps_weight_grad, f"mismatch in cpu:{cpu_name} vs mps:{mps_name}") # test batch_first backward lstm = nn.LSTM(2, 4, layers, batch_first=True) lstm.train() hx = torch.randn((layers, 5, 4), requires_grad=True, dtype=dtype, device=device) cx = torch.randn((layers, 5, 4), requires_grad=True, dtype=dtype, device=device) cpu_output, cpu_weights_grad, cpu_input_grad, cpu_hx_grad, cpu_cx_grad = get_results("cpu", inp, hx, cx) mps_output, mps_weights_grad, mps_input_grad, mps_hx_grad, mps_cx_grad = get_results(device, inp, hx, cx) self.assertEqual(cpu_hx_grad, mps_hx_grad) self.assertEqual(cpu_cx_grad, mps_cx_grad) self.assertEqual(cpu_output, mps_output) self.assertEqual(cpu_input_grad, mps_input_grad) for (cpu_name, cpu_weight_grad), (mps_name, mps_weight_grad) in zip(cpu_weights_grad, mps_weights_grad): self.assertEqual(cpu_weight_grad, mps_weight_grad, f"mismatch in cpu:{cpu_name} vs mps:{mps_name}")
def test_lstm_backward(self, device="mps", dtype=torch.float32): for num_layers in [1, 2, 5]: for test_options in self.LSTM_TEST_CASES: self._lstm_helper(num_layers=num_layers, dtype=dtype, device=device, backward=True, **test_options)
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning class TestRNNMPS(TestCaseMPS):
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix class TestRNNMPS(TestCaseMPS):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_mps.py
test
def test(N, C, H, W, mode, padding_mode, align_corners, input_requires_grad): def test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners): for grid_dim_contig_order in [(0, 1, 2, 3), (0, 3, 1, 2), (3, 0, 1, 2), (0, 2, 1, 3)]: # grid_dim_contig_order specifies the dimension order that can # make grid to be contiguous. # i.e., grid.permute(grid_dim_contig_order) is contiguous. # e.g., with grid_dim_contig_order=[0, 3, 1, 2], grid should be # initialized with contiguous tensor of shape [N, 2, H, W] # and permuted to [N, H, W, 2] afterwards. grid_shape = [N, H, W, 2] grid_init_shape = [grid_shape[d] for d in grid_dim_contig_order] grid_fwd_permute = [None, None, None, None] for i, d in enumerate(grid_dim_contig_order): grid_fwd_permute[d] = i def get_grid(device='cpu', data=None): if data is not None: assert list(data.shape) == grid_shape data = data.permute(grid_dim_contig_order).to(device) else: data = torch.randn(grid_init_shape, device=device) grid = data.permute(grid_fwd_permute) assert grid.permute(grid_dim_contig_order).is_contiguous() return grid input_cpu = torch.randn(C, N, IH, IW).transpose(0, 1).requires_grad_(input_requires_grad) grid_cpu = get_grid().requires_grad_() out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertTrue(out_cpu.size() == torch.Size([N, C, H, W])) gradients = torch.randn_like(out_cpu) out_cpu.backward(gradients) # Compare against unvectorized CPU fallback # NOTE [ grid_sample CPU fallback ] # grid_sample uses AVX for 2d images, but that requires 32-bit indexing for # 32-bit floats. So we also have a fallback that is used only for float tensors # requiring 64-bit indexing. That requires too much memory to run on CI, so we # also export the fallback and test it here to ensure feature parity with # the vectorized version. input_fallback = input_cpu.float().detach_().requires_grad_() grid_fallback = grid_cpu.float().detach_().requires_grad_() out_fallback = torch._grid_sampler_2d_cpu_fallback( input_fallback, grid_fallback, F.GRID_SAMPLE_INTERPOLATION_MODES[mode], F.GRID_SAMPLE_PADDING_MODES[padding_mode], align_corners) self.assertEqual(out_fallback, out_cpu.float(), atol=1e-5, rtol=5e-5) out_fallback.backward(gradients.float()) if input_requires_grad: self.assertEqual(input_fallback.grad, input_cpu.grad.float(), atol=1e-4, rtol=5e-5) self.assertEqual(grid_fallback.grad, grid_cpu.grad.float(), atol=1e-4, rtol=5e-5) input_mps = input_cpu.detach().transpose(0, 1).to("mps").transpose(0, 1).requires_grad_(input_requires_grad) grid_mps = get_grid('mps', grid_cpu.detach()).requires_grad_() out_mps = F.grid_sample(input_mps, grid_mps, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(out_cpu, out_mps) out_mps.backward(gradients.to("mps")) if input_requires_grad: self.assertEqual(input_cpu.grad, input_mps.grad) self.assertEqual(grid_cpu.grad, grid_mps.grad, atol=5e-5, rtol=0) # check that zero-dimensional input strides don't error out base_input = torch.randn(N, C, 1, IW) input_cpu = base_input.expand_as(input_mps).requires_grad_(input_requires_grad) out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode, align_corners=align_corners) input_mps = base_input.to("mps").expand_as(input_mps).requires_grad_(input_requires_grad) out_mps = F.grid_sample(input_mps, grid_mps, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(out_cpu, out_mps) # test same size output test_shape(N, C, H, W, H, W, mode, padding_mode, align_corners) # test larger output N = random.randint(2, 8) C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(IH + 1, 12) W = random.randint(IW + 1, 12) test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners) # test smaller output N = random.randint(2, 8) C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(2, IH) W = random.randint(2, IW) test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners) # test 1x1 inpput N = random.randint(2, 8) C = random.randint(2, 8) IH = 1 IW = 1 H = random.randint(2, 5) W = random.randint(2, 5) test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners) # testing empty grid N = random.randint(2, 8) C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) W = random.randint(3, IW + 2) test_shape(N, C, IH, IW, 0, W, mode, padding_mode, align_corners) # testing empty channel N = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(3, IH + 2) W = random.randint(3, IW + 2) test_shape(N, 0, IH, IW, H, W, mode, padding_mode, align_corners) # testing empty batch C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(3, IH + 2) W = random.randint(3, IW + 2) test_shape(0, C, IH, IW, H, W, mode, padding_mode, align_corners) for mode in ('bilinear', 'nearest'): for padding_mode in ('zeros', 'reflection'): for align_corners in (True, False): # test known input input = torch.arange(1., 11, device="mps").view(1, 1, 2, 5) grid = torch.tensor( [[[-0.9, -4.1], [0, 0.2000], [1, -1], [-0.333, 1e-6], [0.5, 1.0]], [[-1.0, -0.5], [0, 0.3333], [1, -1], [-0.200, 1e-6], [1.5, 0.5]]], device="mps").view(1, 2, 5, 2) if mode == 'bilinear': if padding_mode == 'zeros': if align_corners: groundtruth = torch.tensor( [[0.0000, 6.0000000000, 5.0000, 4.8340, 9.0000], [2.2500, 6.3332500450, 5.0000, 5.1000, 0.0000]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[0.0000, 6.5000000000, 1.2500, 4.6675000191, 4.6250], [0.5000, 7.1665000916, 1.2500, 5.0000000000, 0.0000]], device="mps").view(1, 1, 2, 5) elif padding_mode == 'border': if align_corners: groundtruth = torch.tensor( [[1.2000, 6.0000000000, 5.0000, 4.8340, 9.0000], [2.2500, 6.3332500450, 5.0000, 5.1000, 8.7500]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[1.0000, 6.5000000000, 5.0000, 4.6675000191, 9.2500], [1.0000, 7.1665000916, 5.0000, 5.0000000000, 10.0000]], device="mps").view(1, 1, 2, 5) elif padding_mode == 'reflection': if align_corners: groundtruth = torch.tensor( [[3.4500, 6.0000000000, 5.0000, 4.8340, 9.0000], [2.2500, 6.3332500450, 5.0000, 5.1000, 7.7500]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[3.0000004768, 6.5000000000, 5.0000, 4.6675000191, 9.2500], [1.0000000000, 7.1665000916, 5.0000, 5.0000000000, 9.2500]], device="mps").view(1, 1, 2, 5) else: raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode)) elif mode == 'nearest': if padding_mode == 'zeros': if align_corners: groundtruth = torch.tensor( [[0., 8., 5., 7., 9.], [1., 8., 5., 8., 0.]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[0., 8., 5., 7., 0.], [1., 8., 5., 8., 0.]], device="mps").view(1, 1, 2, 5) elif padding_mode == 'border': if align_corners: groundtruth = torch.tensor( [[1., 8., 5., 7., 9.], [1., 8., 5., 8., 10.]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[1., 8., 5., 7., 9.], [1., 8., 5., 8., 10.]], device="mps").view(1, 1, 2, 5) elif padding_mode == 'reflection': if align_corners: groundtruth = torch.tensor( [[1., 8., 5., 7., 9.], [1., 8., 5., 8., 9.]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[1., 8., 5., 7., 9.], [1., 8., 5., 8., 9.]], device="mps").view(1, 1, 2, 5) else: raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode)) elif mode == 'bicubic': if padding_mode == 'zeros': if align_corners: groundtruth = torch.tensor( [[-0.10424726, 7.1400003, 5.0000, 5.7842274, 9.0000], [2.4492188, 7.4814040, 5.0000, 6.0277520, 0.0000]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[0.00000, 7.6287503, 1.0625, 5.5977230, 5.3270264], [0.40625, 8.0288770, 1.0625, 5.9375067, -0.3515625]], device="mps").view(1, 1, 2, 5) elif padding_mode == 'border': if align_corners: groundtruth = torch.tensor( [[1.1520010, 6.0599990, 5.0000, 4.870930, 9.0000000], [2.1328125, 6.4258375, 5.0000, 5.076003, 8.8671875]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[0.894531, 6.6050020, 4.625, 4.7138715, 9.800781], [0.906250, 7.2822485, 4.625, 5.0000052, 10.00000]], device="mps").view(1, 1, 2, 5) elif padding_mode == 'reflection': if align_corners: groundtruth = torch.tensor( [[3.1822524, 6.239998, 5.0000, 4.8709273, 9.00000], [1.7812500, 6.703594, 5.0000, 5.0760007, 8.21875]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[2.7993753, 6.6050020, 4.25, 4.7138715, 10.269531], [0.8125000, 7.2822485, 4.25, 5.0000052, 9.332031]], device="mps").view(1, 1, 2, 5) else: raise AssertionError("missing groundtruth test for padding mode '{}'".format(padding_mode)) else: raise AssertionError("missing groundtruth test for interpolation mode '{}'".format(mode)) output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(output, groundtruth, atol=1e-5, rtol=0, msg="groundtruth comparison failed for mode={}, " "padding_mode={}".format(mode, padding_mode))
def test(N, C, H, W, mode, padding_mode, align_corners, input_requires_grad): def test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners): for grid_dim_contig_order in [(0, 1, 2, 3), (0, 3, 1, 2), (3, 0, 1, 2), (0, 2, 1, 3)]: # grid_dim_contig_order specifies the dimension order that can # make grid to be contiguous. # i.e., grid.permute(grid_dim_contig_order) is contiguous. # e.g., with grid_dim_contig_order=[0, 3, 1, 2], grid should be # initialized with contiguous tensor of shape [N, 2, H, W] # and permuted to [N, H, W, 2] afterwards. grid_shape = [N, H, W, 2] grid_init_shape = [grid_shape[d] for d in grid_dim_contig_order] grid_fwd_permute = [None, None, None, None] for i, d in enumerate(grid_dim_contig_order): grid_fwd_permute[d] = i def get_grid(device='cpu', data=None): if data is not None: assert list(data.shape) == grid_shape data = data.permute(grid_dim_contig_order).to(device) else: data = torch.randn(grid_init_shape, device=device) grid = data.permute(grid_fwd_permute) assert grid.permute(grid_dim_contig_order).is_contiguous() return grid input_cpu = torch.randn(C, N, IH, IW).transpose(0, 1).requires_grad_(input_requires_grad) grid_cpu = get_grid().requires_grad_() out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(out_cpu.size(), torch.Size([N, C, H, W])) gradients = torch.randn_like(out_cpu) out_cpu.backward(gradients) # Compare against unvectorized CPU fallback # NOTE [ grid_sample CPU fallback ] # grid_sample uses AVX for 2d images, but that requires 32-bit indexing for # 32-bit floats. So we also have a fallback that is used only for float tensors # requiring 64-bit indexing. That requires too much memory to run on CI, so we # also export the fallback and test it here to ensure feature parity with # the vectorized version. input_fallback = input_cpu.float().detach_().requires_grad_() grid_fallback = grid_cpu.float().detach_().requires_grad_() out_fallback = torch._grid_sampler_2d_cpu_fallback( input_fallback, grid_fallback, F.GRID_SAMPLE_INTERPOLATION_MODES[mode], F.GRID_SAMPLE_PADDING_MODES[padding_mode], align_corners) self.assertEqual(out_fallback, out_cpu.float(), atol=1e-5, rtol=5e-5) out_fallback.backward(gradients.float()) if input_requires_grad: self.assertEqual(input_fallback.grad, input_cpu.grad.float(), atol=1e-4, rtol=5e-5) self.assertEqual(grid_fallback.grad, grid_cpu.grad.float(), atol=1e-4, rtol=5e-5) input_mps = input_cpu.detach().transpose(0, 1).to("mps").transpose(0, 1).requires_grad_(input_requires_grad) grid_mps = get_grid('mps', grid_cpu.detach()).requires_grad_() out_mps = F.grid_sample(input_mps, grid_mps, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(out_cpu, out_mps) out_mps.backward(gradients.to("mps")) if input_requires_grad: self.assertEqual(input_cpu.grad, input_mps.grad) self.assertEqual(grid_cpu.grad, grid_mps.grad, atol=5e-5, rtol=0) # check that zero-dimensional input strides don't error out base_input = torch.randn(N, C, 1, IW) input_cpu = base_input.expand_as(input_mps).requires_grad_(input_requires_grad) out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode, align_corners=align_corners) input_mps = base_input.to("mps").expand_as(input_mps).requires_grad_(input_requires_grad) out_mps = F.grid_sample(input_mps, grid_mps, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(out_cpu, out_mps) # test same size output test_shape(N, C, H, W, H, W, mode, padding_mode, align_corners) # test larger output N = random.randint(2, 8) C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(IH + 1, 12) W = random.randint(IW + 1, 12) test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners) # test smaller output N = random.randint(2, 8) C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(2, IH) W = random.randint(2, IW) test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners) # test 1x1 inpput N = random.randint(2, 8) C = random.randint(2, 8) IH = 1 IW = 1 H = random.randint(2, 5) W = random.randint(2, 5) test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners) # testing empty grid N = random.randint(2, 8) C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) W = random.randint(3, IW + 2) test_shape(N, C, IH, IW, 0, W, mode, padding_mode, align_corners) # testing empty channel N = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(3, IH + 2) W = random.randint(3, IW + 2) test_shape(N, 0, IH, IW, H, W, mode, padding_mode, align_corners) # testing empty batch C = random.randint(2, 8) IH = random.randint(2, 8) IW = random.randint(2, 8) H = random.randint(3, IH + 2) W = random.randint(3, IW + 2) test_shape(0, C, IH, IW, H, W, mode, padding_mode, align_corners) for mode in ('bilinear', 'nearest'): for padding_mode in ('zeros', 'reflection'): for align_corners in (True, False): # test known input input = torch.arange(1., 11, device="mps").view(1, 1, 2, 5) grid = torch.tensor( [[[-0.9, -4.1], [0, 0.2000], [1, -1], [-0.333, 1e-6], [0.5, 1.0]], [[-1.0, -0.5], [0, 0.3333], [1, -1], [-0.200, 1e-6], [1.5, 0.5]]], device="mps").view(1, 2, 5, 2) if mode == 'bilinear': if padding_mode == 'zeros': if align_corners: groundtruth = torch.tensor( [[0.0000, 6.0000000000, 5.0000, 4.8340, 9.0000], [2.2500, 6.3332500450, 5.0000, 5.1000, 0.0000]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[0.0000, 6.5000000000, 1.2500, 4.6675000191, 4.6250], [0.5000, 7.1665000916, 1.2500, 5.0000000000, 0.0000]], device="mps").view(1, 1, 2, 5) elif padding_mode == 'border': if align_corners: groundtruth = torch.tensor( [[1.2000, 6.0000000000, 5.0000, 4.8340, 9.0000], [2.2500, 6.3332500450, 5.0000, 5.1000, 8.7500]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[1.0000, 6.5000000000, 5.0000, 4.6675000191, 9.2500], [1.0000, 7.1665000916, 5.0000, 5.0000000000, 10.0000]], device="mps").view(1, 1, 2, 5) elif padding_mode == 'reflection': if align_corners: groundtruth = torch.tensor( [[3.4500, 6.0000000000, 5.0000, 4.8340, 9.0000], [2.2500, 6.3332500450, 5.0000, 5.1000, 7.7500]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[3.0000004768, 6.5000000000, 5.0000, 4.6675000191, 9.2500], [1.0000000000, 7.1665000916, 5.0000, 5.0000000000, 9.2500]], device="mps").view(1, 1, 2, 5) else: raise AssertionError(f"missing groundtruth test for padding mode '{padding_mode}'") elif mode == 'nearest': if padding_mode == 'zeros': if align_corners: groundtruth = torch.tensor( [[0., 8., 5., 7., 9.], [1., 8., 5., 8., 0.]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[0., 8., 5., 7., 0.], [1., 8., 5., 8., 0.]], device="mps").view(1, 1, 2, 5) elif padding_mode == 'border': if align_corners: groundtruth = torch.tensor( [[1., 8., 5., 7., 9.], [1., 8., 5., 8., 10.]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[1., 8., 5., 7., 9.], [1., 8., 5., 8., 10.]], device="mps").view(1, 1, 2, 5) elif padding_mode == 'reflection': if align_corners: groundtruth = torch.tensor( [[1., 8., 5., 7., 9.], [1., 8., 5., 8., 9.]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[1., 8., 5., 7., 9.], [1., 8., 5., 8., 9.]], device="mps").view(1, 1, 2, 5) else: raise AssertionError(f"missing groundtruth test for padding mode '{padding_mode}'") elif mode == 'bicubic': if padding_mode == 'zeros': if align_corners: groundtruth = torch.tensor( [[-0.10424726, 7.1400003, 5.0000, 5.7842274, 9.0000], [2.4492188, 7.4814040, 5.0000, 6.0277520, 0.0000]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[0.00000, 7.6287503, 1.0625, 5.5977230, 5.3270264], [0.40625, 8.0288770, 1.0625, 5.9375067, -0.3515625]], device="mps").view(1, 1, 2, 5) elif padding_mode == 'border': if align_corners: groundtruth = torch.tensor( [[1.1520010, 6.0599990, 5.0000, 4.870930, 9.0000000], [2.1328125, 6.4258375, 5.0000, 5.076003, 8.8671875]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[0.894531, 6.6050020, 4.625, 4.7138715, 9.800781], [0.906250, 7.2822485, 4.625, 5.0000052, 10.00000]], device="mps").view(1, 1, 2, 5) elif padding_mode == 'reflection': if align_corners: groundtruth = torch.tensor( [[3.1822524, 6.239998, 5.0000, 4.8709273, 9.00000], [1.7812500, 6.703594, 5.0000, 5.0760007, 8.21875]], device="mps").view(1, 1, 2, 5) else: groundtruth = torch.tensor( [[2.7993753, 6.6050020, 4.25, 4.7138715, 10.269531], [0.8125000, 7.2822485, 4.25, 5.0000052, 9.332031]], device="mps").view(1, 1, 2, 5) else: raise AssertionError(f"missing groundtruth test for padding mode '{padding_mode}'") else: raise AssertionError(f"missing groundtruth test for interpolation mode '{mode}'") output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(output, groundtruth, atol=1e-5, rtol=0, msg=f"groundtruth comparison failed for mode={mode}, " f"padding_mode={padding_mode}")
import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import pprint import copy import gc import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI, TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings) from torch.testing import make_tensor from torch.testing._comparison import TensorLikePair from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS from torch.testing._internal.common_nn import NNTestCase import numpy as np import torch import torch.utils._pytree as pytree from itertools import product _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning
import io import platform import sys import math import random import unittest import warnings import subprocess import tempfile import os import copy import gc import threading import torch import torch.nn as nn import torch.nn.functional as F import itertools from collections import defaultdict from torch import inf from torch.nn import Buffer, Parameter from torch.testing._internal import opinfo from torch.testing._internal.common_utils import \ (gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI, NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests) from torch.testing import make_tensor from torch.testing._internal.common_dtype import get_all_dtypes, integral_types import torch.backends.mps from torch.distributions import Uniform, Exponential from functools import partial from torch.testing._internal.common_methods_invocations import ( op_db, DecorateInfo, UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo, ) from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes from torch.testing._internal.common_nn import NNTestCase from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel import numpy as np import torch import torch.utils._pytree as pytree from itertools import product import operator test_consistency_op_db = copy.deepcopy(op_db) test_error_inputs_op_db = copy.deepcopy(op_db) _ref_test_ops = tuple( filter( lambda op: not isinstance( op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo) ) and op.ref is not None, op_db, ) ) product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' import numpy as np import numpy as np import numpy as np import torch from torch.utils.checkpoint import checkpoint import numpy as np from torch.serialization import SourceChangeWarning from torch.serialization import SourceChangeWarning from torch.testing._internal.common_utils import random_hermitian_pd_matrix
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified