library
stringclasses 1
value | test_file
stringclasses 785
values | test_function
stringlengths 1
295
| before
stringlengths 0
448k
| after
stringlengths 0
487k
| context_before
stringclasses 947
values | context_after
stringlengths 0
16.3k
| commit_before
stringclasses 1
value | commit_after
stringclasses 1
value | change_type
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|---|
torch
|
test/test_meta.py
|
test_embedding_bag_byte_unpack
|
def test_embedding_bag_byte_unpack(self):
batch_size = 10
num_embeddings = 80
embedding_dim = [128, 256, 512]
res_shape = [[batch_size, num_embeddings, ed] for ed in embedding_dim]
for ed, rs in zip(embedding_dim, res_shape):
packed_weight = torch.randn(batch_size, num_embeddings, ed + 8, dtype=torch.float32)
res = torch.ops.quantized.embedding_bag_byte_unpack(packed_weight.to(device="meta"))
self.assertEqual(res.shape, rs)
self.assertEqual(res.dtype, torch.float32)
self.assertEqual(res.untyped_storage().data_ptr(), 0)
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
@unMarkDynamoStrictTest
class TestMeta(TestCase):
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_meta.py
|
f
|
def f():
input = torch.randn([8, 16], device='meta')
index = torch.tensor([2, 1, 6, 7, 3, 1, 7, 5, 6, 7], device='meta')
out = torch.empty([10, 16], device='meta')
return torch.index_select(input=input, dim=0, index=index, out=out)
with enable_python_dispatcher():
out = f()
self.assertEqual(out.shape, [10, 16])
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_meta.py
|
test_local_scalar_dense_call
|
instantiate_device_type_tests(TestMeta, globals())
|
def test_local_scalar_dense_call(self):
with self.assertRaisesRegex(RuntimeError, "cannot be called on meta tensors"):
meta_tensor = torch.randn(1, device='meta')
meta_tensor.item()
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
@unMarkDynamoStrictTest
class TestMeta(TestCase):
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_meta.py
|
print_op_str_if_not_supported
|
def print_op_str_if_not_supported(op_str):
op = OperatorName.parse(op_str)
packet = getattr(torch.ops.aten, str(op.name))
overload = getattr(packet, op.overload_name if op.overload_name else "default")
if any(overload in d for d in [meta_dispatch_skips, meta_dispatch_device_skips['cuda']]):
print(f"{overload} # SKIP")
if any(overload in d for d in [meta_dispatch_expected_failures, meta_dispatch_device_expected_failures['cuda']]):
print(overload)
if __name__ == "__main__":
COMPARE_XLA = os.getenv('PYTORCH_COMPARE_XLA', None)
if COMPARE_XLA is not None:
with open(COMPARE_XLA, "r") as f:
d = yaml.load(f, Loader=YamlLoader)
ops = d.get("full_codegen", []) + d.get("supported", []) + d.get("autograd", [])
for op_str in ops:
print_op_str_if_not_supported(op_str)
sys.exit(0)
COMPARE_TEXT = os.getenv('PYTORCH_COMPARE_TEXT', None)
if COMPARE_TEXT is not None:
with open(COMPARE_TEXT, "r") as f:
for op_str in f:
print_op_str_if_not_supported(op_str.strip())
sys.exit(0)
run_tests()
|
def print_op_str_if_not_supported(op_str):
op = OperatorName.parse(op_str)
packet = getattr(torch.ops.aten, str(op.name))
overload = getattr(packet, op.overload_name if op.overload_name else "default")
if any(overload in d for d in [meta_dispatch_skips, meta_dispatch_device_skips['cuda']]):
print(f"{overload} # SKIP")
if any(overload in d for d in [meta_dispatch_expected_failures, meta_dispatch_device_expected_failures['cuda']]):
print(overload)
if __name__ == "__main__":
COMPARE_XLA = os.getenv('PYTORCH_COMPARE_XLA', None)
if COMPARE_XLA is not None:
with open(COMPARE_XLA) as f:
d = yaml.load(f, Loader=YamlLoader)
ops = d.get("full_codegen", []) + d.get("supported", []) + d.get("autograd", [])
for op_str in ops:
print_op_str_if_not_supported(op_str)
sys.exit(0)
COMPARE_TEXT = os.getenv('PYTORCH_COMPARE_TEXT', None)
if COMPARE_TEXT is not None:
with open(COMPARE_TEXT) as f:
for op_str in f:
print_op_str_if_not_supported(op_str.strip())
sys.exit(0)
run_tests()
|
import itertools
import torch
import os
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
run_tests,
dtype_abbrs
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import op_db
from torchgen.utils import YamlLoader
from torchgen.model import OperatorName
import sys
import yaml
import atexit
import re
from collections import defaultdict
import unittest
import warnings
import weakref
from functools import wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.count_nonzero : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.linalg.householder_product : {f64, c64, c128, f32},
torch.linalg.solve_triangular : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.matrix_exp : {f64, c128, c64, bf16, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.ormqr : {f64, c64, c128, f32},
torch.repeat_interleave : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.take : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.frexp : {f64, f16, bf16, f32},
torch.functional.unique : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, bf16, b8, i8, f32},
torch.histc : {f64, bf16, f32},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.kthvalue : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.logcumsumexp : {f64, bf16, f32, c64, c128},
torch.median : {f64, i32, i64, u8, i16, bf16, i8, f32},
torch.mode : {f64, i32, i64, f16, u8, i16, bf16, b8, i8, f32},
torch.multinomial : {f64, bf16, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f64, bf16, f32},
torch.nn.functional.max_pool3d : {f64, f32},
torch.nn.functional.max_pool3d_with_indices : {f64, f32},
torch.nn.functional.max_unpool1d : {f64, f32},
torch.nn.functional.max_unpool2d : {f64, f32},
torch.nn.functional.max_unpool3d : {f64, f32},
torch.nn.functional.multi_margin_loss : {f64, f32},
torch.nn.functional.multilabel_margin_loss : {f64, f32},
torch.nn.functional.one_hot : {i64},
torch.nn.functional.pdist : {f64, f32},
torch.polar : {f64, f32},
torch._segment_reduce : {f64, f16, bf16, f32},
torch.searchsorted : {f64, i32, i64, f16, u8, i16, bf16, i8, f32},
torch.cholesky : {f64, f32, c128, c64},
torch.cholesky_inverse : {f64, f32, c128, c64},
torch.cholesky_solve : {f64, f32, c128, c64},
torch.linalg.eig : {f64, f32, c128, c64},
torch.linalg.eigvals : {f64, f32, c128, c64},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_only_outplace = {
torch.nn.functional.rrelu : {f64, bf16, f32},
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.functional.tensordot : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.inner : {bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.lu_solve : {c128, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_power : {c128, c64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.aminmax : {i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummax : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.cummin : {bf16, i8, i64, u8, f64, b8, f32, i32, i16},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cdist : {f64, f32},
torch.nanmean : {bf16, f64, f32, f16},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.interpolate : {bf16, f64, f32, u8},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.pinv : {f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vander: {c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f64, f32},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
torch.native_batch_norm: {bf16},
torch._native_batch_norm_legit: {bf16},
torch.native_layer_norm: {bf16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
torch.histc: {i16, i32, i64, i8}, # aten::histc, aten::histc.out
torch.kthvalue: {f16}, # aten::kthvalue.values
torch.linalg.householder_product: {f32, f64}, # aten::linalg_householder_product, aten::linalg_householder_product.out
torch.linalg.solve_triangular: {f32, f64}, # aten::linalg_solve_triangular, aten::linalg_solve_triangular.out
torch.logcumsumexp: {bf16, f16}, # aten::_logcumsumexp, aten::_logcumsumexp.out
torch.matrix_exp: {f16}, # aten::linalg_matrix_exp
torch.median: {f16}, # aten::median, aten::median.dim_values
torch.multinomial: {f16}, # aten::multinomial, aten::multinomial.out
torch.nn.functional.gaussian_nll_loss: {f16}, # aten::_local_scalar_dense
torch.nn.functional.max_pool3d: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_pool3d_with_indices: {bf16, f16}, # aten::max_pool3d_with_indices
torch.nn.functional.max_unpool1d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool2d: {f16}, # aten::max_unpool2d
torch.nn.functional.max_unpool3d: {f16}, # aten::max_unpool3d
torch.nn.functional.multi_margin_loss: {bf16, f16}, # aten::multi_margin_loss
torch.nn.functional.multilabel_margin_loss: {bf16, f16}, # aten::multilabel_margin_loss_forward
torch.ormqr: {f32, f64}, # aten::ormqr, aten::ormqr.out
}
meta_function_device_expected_failures_only_outplace['cuda'] = {
torch.nn.functional.rrelu: {f16}, # aten::rrelu_with_noise
}
meta_function_device_skips['cpu'] = {
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.cummax: {f16},
torch.cummin: {f16},
torch.functional.tensordot: {f16},
torch.inner: {f16},
torch.linalg.matrix_power: {f32, f64},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
# This fails for arguments dispatched to grid_sampler_3d, but succeeds
# for grid_sampler_2d, so we can't just xfail it
torch.nn.functional.grid_sample : {f16},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.cholesky.default : {c64, c128, f64, f32},
aten.cholesky.out : {c64, c128, f64, f32},
aten.cholesky_inverse.default : {c64, c128, f64, f32},
aten.cholesky_inverse.out : {c64, c128, f64, f32},
aten.cholesky_solve.default : {c64, c128, f64, f32},
aten.cholesky_solve.out : {c64, c128, f64, f32},
aten.count_nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.count_nonzero.dim_IntList : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_eig.default : {c64, c128, f64, f32},
aten.linalg_householder_product.default : {c64, c128, f64, f32},
aten.linalg_householder_product.out : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.linalg_matrix_exp.default : {c64, bf16, f32, f64, c128},
aten.linalg_solve_triangular.default : {c64, c128, f64, f32},
aten.linalg_solve_triangular.out : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.ormqr.default : {c64, c128, f64, f32},
aten.ormqr.out : {c64, c128, f64, f32},
aten.polar.out : {f32, f64},
aten.take.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.take.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.tensordot.out : {c64, i8, f64, c128, i64, bf16, f32, i32, i16, u8},
aten.to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.default : {f32, f64}, # Shape of second output depends on data.
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._pdist_forward.default : {f32, f64},
aten._unique2.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.frexp.Tensor : {bf16, f32, f16, f64},
aten.grid_sampler_3d.default : {f32, f64},
aten.histc.default : {bf16, f32, f64},
aten.histc.out : {bf16, f32, f64},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.kthvalue.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.logcumsumexp.default : {bf16, f32, f64, c64, c128},
aten.logcumsumexp.out : {bf16, f32, f64, c64, c128},
aten.max_pool3d_with_indices.default : {f32, f64},
aten.max_unpool2d.default : {f32, f64},
aten.max_unpool3d.default : {f32, f64},
aten.median.default : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.median.dim : {i8, f64, i64, bf16, f32, i32, i16, u8},
aten.mode.default : {f16, i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.multi_margin_loss.default : {f32, f64},
aten.multilabel_margin_loss_forward.default : {f32, f64},
aten.multinomial.default : {bf16, f32, f64},
aten.multinomial.out : {bf16, f32, f64},
aten.nll_loss2d_forward.default : {bf16, f32, f64},
aten.polar.default : {f32, f64},
aten.rrelu_with_noise.default : {bf16, f32, f64},
aten.searchsorted.Tensor : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.searchsorted.Tensor_out : {f16, i8, f64, i64, bf16, f32, i32, i16, u8},
aten.segment_reduce.default : {bf16, f32, f16, f64},
aten.unique_consecutive.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.unique_dim.default : {i8, f64, i64, bf16, f32, i32, b8, i16, u8},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.aminmax.default: {i64, u8, b8, f32, i8, f64, i16, i32},
aten.cummax.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.cummin.default: {i64, bf16, u8, b8, f32, i8, f64, i16, i32},
aten.linalg_lu_solve.default: {c32, c64, c128},
aten.linalg_lu_solve.out: {c32, c64, c128},
aten.linalg_pinv.atol_rtol_tensor: {f32, f64},
aten.linalg_pinv.atol_rtol_tensor_out: {f32, f64},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
aten.native_batch_norm.default: {bf16},
aten._native_batch_norm_legit.default: {bf16},
aten._native_batch_norm_legit.no_stats: {bf16},
aten.native_layer_norm.default: {bf16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.grid_sampler_3d.default: {f16}, # aten::grid_sampler_3d
aten.histc.default: {i16, i32, i64, i8}, # aten::histc
aten.histc.out: {i16, i32, i64, i8}, # aten::histc.out
aten.kthvalue.default: {f16}, # aten::kthvalue.values
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.linalg_householder_product.default: {f32, f64}, # aten::linalg_householder_product
aten.linalg_householder_product.out: {f32, f64}, # aten::linalg_householder_product.out
aten.linalg_matrix_exp.default: {f16}, # aten::linalg_matrix_exp
aten.linalg_solve_triangular.default: {f32, f64}, # aten::linalg_solve_triangular
aten.linalg_solve_triangular.out: {f32, f64}, # aten::linalg_solve_triangular.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.logcumsumexp.default: {bf16, f16}, # aten::_logcumsumexp
aten.logcumsumexp.out: {bf16, f16}, # aten::_logcumsumexp.out
aten.max_pool3d_with_indices.default: {bf16, f16}, # aten::max_pool3d_with_indices
aten.max_unpool2d.default: {f16}, # aten::max_unpool2d
aten.max_unpool3d.default: {f16}, # aten::max_unpool3d
aten.median.default: {f16}, # aten::median
aten.median.dim: {f16}, # aten::median.dim_values
aten.multi_margin_loss.default: {bf16, f16}, # aten::multi_margin_loss
aten.multilabel_margin_loss_forward.default: {bf16, f16}, # aten::multilabel_margin_loss_forward
aten.multinomial.default: {f16}, # aten::multinomial
aten.multinomial.out: {f16}, # aten::multinomial.out
aten.nll_loss2d_forward.default: {f16}, # aten::nll_loss2d_forward
aten.ormqr.default: {f32, f64}, # aten::ormqr
aten.ormqr.out: {f32, f64}, # aten::ormqr.out
aten.rrelu_with_noise.default: {f16}, # aten::rrelu_with_noise
aten.tensordot.out: {f16}, # aten::tensordot.out
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.cummax.default: {f16},
aten.cummin.default: {f16},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
import itertools
import torch
import os
import numpy as np
from enum import Enum
from torch.overrides import resolve_name
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from torch.utils import _pytree as pytree
from torch._subclasses.meta_utils import MetaConverter, assert_metadata_eq, is_sparse_any
import torch.utils._python_dispatch
from torch._dispatch.python import enable_python_dispatcher
from torch._ops import OpOverload, OpOverloadPacket
from torch.testing import make_tensor
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
TestCase,
skipIfCrossRef,
skipIfTorchDynamo,
suppress_warnings,
TEST_WITH_ASAN,
TEST_WITH_TORCHDYNAMO,
run_tests,
dtype_abbrs,
parametrize
)
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
onlyCUDA,
onlyCPU,
OpDTypes,
)
from torch.testing._internal.common_methods_invocations import (
binary_ufuncs, op_db, foreach_unary_op_db, foreach_binary_op_db,
foreach_pointwise_op_db, foreach_reduce_op_db, foreach_other_op_db)
from torch.testing._internal.opinfo.core import S, SampleInput
from torchgen.yaml_utils import YamlLoader
from torchgen.model import OperatorName
import copy
import sys
import yaml
import atexit
import re
from collections import defaultdict
from collections.abc import Iterable
import unittest
import warnings
import weakref
from functools import partial, wraps
bf16 = torch.bfloat16
f64 = torch.float64
f32 = torch.float32
f16 = torch.float16
c32 = torch.complex32
c64 = torch.complex64
c128 = torch.complex128
i8 = torch.int8
i16 = torch.int16
i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
u16 = torch.uint16
u32 = torch.uint32
u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
foreach_binary_op_db +
foreach_pointwise_op_db +
foreach_reduce_op_db +
foreach_other_op_db
)
aten = torch.ops.aten
CHECK_STRIDES = {
torch.Tensor.__getitem__,
}
CHECK_ALL_STRIDES = {
aten.unsqueeze.default
}
CHECK_STRIDES_SKIPS = {
aten._conj_physical.default,
aten._fft_c2c.default,
aten._fft_c2r.default,
aten._fft_r2c.default,
aten._linalg_svd.default,
aten.binary_cross_entropy.default,
aten.complex.default,
aten.polar.default,
aten.copysign.Tensor,
aten.div.Tensor_mode,
aten.floor_divide.default,
aten.heaviside.default,
aten.lerp.Scalar,
aten.lerp.Tensor,
aten.logaddexp.default,
aten.logical_and.default,
aten.logical_or.default,
aten.logical_xor.default,
aten.pow.Scalar,
aten.prelu.default,
aten.special_xlog1py.default,
aten.xlogy.Tensor,
aten.nll_loss2d_forward.default,
# channel_last and channel_last_3d related failures
aten.convolution.default,
# following ops fails if include_storage_offset = True, but these are a bit edge casey
# we should still fix them, leaving them here for tracking.
# aten._reshape_alias.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_matmul_cuda_float32
# aten.view.default, # repro with test_dispatch_symbolic_meta_outplace_all_strides_unflatten_cuda_float32
}
CHECK_CONJ_SKIPS = {
# The conj bit is not copied, see:
# https://github.com/pytorch/pytorch/pull/101836
aten.linalg_lu_solve.out,
}
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_succeeded = {}
seen_failed = {}
failed_reasons = defaultdict(set)
TestExpect = Enum("TestExpect", ("SUCCESS", "XFAILURE", "SKIP"))
RE_NOT_IMPLEMENTED_MSG = re.compile(r"Could not run '([^']+)' with arguments ")
meta_function_expected_failures = {
torch.Tensor.to_sparse : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.allclose : {f64, f16, c128, c64, bf16, f32},
torch.argwhere : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.combinations : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.corrcoef : {f64, i32, c128, i64, i16, u8, c64, bf16, f16, i8, f32},
torch.cov : {f64, i32, c128, i64, i16, u8, c64, bf16, i8, f32, f16},
torch.functional.istft : {f64, c64, c128, f32},
torch.geqrf : {f64, c64, c128, f32},
torch.masked_select : {f64, i32, c128, i64, i16, f16, u8, c64, bf16, b8, i8, f32},
torch.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
torch.nn.functional.ctc_loss : {f64, f32},
torch.nn.functional.gaussian_nll_loss : {f16, f64, bf16, f32},
torch.linalg.lstsq : {f64, f32, c128, c64},
}
meta_function_expected_failures_conditional = {
torch.repeat_interleave : (lambda dtype, *args, **kwargs: not isinstance(kwargs.get("repeats", None), int)),
}
meta_function_skips = {
torch.Tensor.__rmatmul__ : {bf16, c128, f64, f32, f16, c64},
torch.Tensor.matmul : {f64, f32, c128, c64},
torch.functional.atleast_2d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.atleast_3d : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.cartesian_prod : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.functional.einsum : {bf16, c128, f64, f32, f16, c64},
torch.inner : {f16, bf16, i8, i64, u8, c128, f64, i16, f32, i32, c64},
torch.linalg.matrix_norm : {c128, f32, c64, f64},
torch.linalg.matrix_rank : {c128, c64},
torch.linalg.svd : {c128, c64},
torch.matmul : {bf16, c128, f64, f32, f16, c64},
torch.nanquantile : {f64, f32},
torch.narrow : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c32, c64},
torch.nn.functional.batch_norm : {f64, f32},
torch.nn.functional.binary_cross_entropy : {bf16, f64, f32, f16},
torch.nn.functional.dropout3d : {bf16, f64, f32, f16},
torch.nn.functional.local_response_norm : {bf16, f64, f32, f16},
torch.svd : {c128, c64},
torch.take_along_dim : {bf16, i8, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.vstack : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.diff : {b8},
torch.equal : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.nanmean : {bf16, f64, f32, f16, c32, c64, c128},
torch.nn.functional.cross_entropy : {bf16, f64, f32},
torch.nn.functional.nll_loss : {bf16, f64, f32},
torch.linalg.cond : {c128, c64, f32, f64},
torch.linalg.vecdot : {bf16, f64, f32, f16},
torch.empty : {bf16, i8, c32, i64, u8, c128, b8, f64, i16, i32, f32, f16, c64},
torch.Tensor.addbmm_: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
torch.nn.functional.one_hot : {i64},
}
meta_function_device_expected_failures = defaultdict(dict)
meta_function_device_expected_failures_only_outplace = defaultdict(dict)
meta_function_device_skips = defaultdict(dict)
meta_function_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {bf16, f16},
torch._native_batch_norm_legit: {bf16, f16},
torch.ops.aten._batch_norm_with_update: {bf16, f16},
torch.native_layer_norm: {bf16, f16},
}
meta_function_device_expected_failures['cuda'] = {
torch.corrcoef: {bf16, f16}, # aten::_local_scalar_dense
torch.cov: {f16}, # aten::_local_scalar_dense
torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim
torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive
torch.geqrf: {f32, f64}, # aten::geqrf
}
meta_function_device_skips['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
torch.native_batch_norm: {f32, f64},
torch._native_batch_norm_legit: {f32, f64},
torch.ops.aten._batch_norm_with_update: {f32, f64},
}
meta_function_device_skips['cuda'] = {
torch.inner: {f16},
torch.linalg.matrix_rank: {f32, f64},
torch.linalg.svd: {f32, f64},
torch.nn.functional.cross_entropy: {f16},
torch.nn.functional.interpolate: {f16},
torch.nn.functional.nll_loss: {f16},
torch.svd: {f32, f64},
}
meta_dispatch_expected_failures = {
aten.allclose.default: {f16, bf16, f32, f64, c64, c128}, # NotImplementedError: 'aten::_local_scalar_dense'
aten.geqrf.default : {c64, c128, f64, f32},
aten.linalg_lstsq.default : {c64, c128, f64, f32},
aten.masked_select.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.masked_select.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.nonzero.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten.nonzero.out : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, c32, b8, i16, u8},
aten._to_sparse.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._to_sparse.sparse_dim : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._ctc_loss.Tensor : {f32, f64}, # Shape of second output depends on data.
aten._histogramdd_bin_edges.default : {f32, f64},
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
meta_dispatch_skips = {
aten.index.Tensor: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128}, # at::nonzero doesn't have a Meta function
aten._to_copy.default: {i64, bf16, f16, u8, b8, f32, i8, f64, i16, i32, c32, c64, c128},
aten.empty.memory_format: {b8, bf16, c128, c64, c32, f16, f32, f64, i16, i32, i64, i8, u8},
aten.addbmm_.default: {bf16, c128, c64, f32, f64, i16, i32, i64, i8, u8},
}
meta_dispatch_early_skips = set({
torch.Tensor.float_power_,
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_inplace_skips = set({
# Errors out in one of the tests, while ProxyTensor passes...
torch.Tensor.cumprod_,
torch.Tensor.cumsum_,
})
meta_dispatch_device_expected_failures = defaultdict(dict)
meta_dispatch_device_skips = defaultdict(dict)
meta_dispatch_device_expected_failures['cpu'] = {
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {bf16, f16},
aten._native_batch_norm_legit.default: {bf16, f16},
aten._native_batch_norm_legit.no_stats: {bf16, f16},
aten._batch_norm_with_update.default: {bf16, f16},
aten.native_layer_norm.default: {bf16, f16},
}
meta_dispatch_device_expected_failures['cuda'] = {
aten._unique2.default: {f16}, # aten::_unique2
aten._use_cudnn_ctc_loss.default: {f32, f64}, # aten::_use_cudnn_ctc_loss
aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor
aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler
aten.geqrf.default: {f32, f64}, # aten::geqrf
aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out
aten.log_sigmoid_forward.default: {bf16, f16, f64, f32},
aten.log_sigmoid_forward.output : {bf16, f16, f64, f32}, # aten::log_sigmoid_forward.output
aten.unique_consecutive.default: {f16}, # aten::unique_consecutive
aten.unique_dim.default: {f16}, # aten::unique_dim
aten.upsample_nearest3d.vec: {f16}, # aten::upsample_nearest3d.vec
}
meta_dispatch_device_skips['cpu'] = {
aten._embedding_bag_forward_only.default: {bf16, f16, f32, f64},
# TODO: The decomps for these batch norm ops return different dtypes depending
# on the device. We should make this work better with meta tensors.
aten.native_batch_norm.default: {f32, f64},
aten._native_batch_norm_legit.default: {f32, f64},
aten._native_batch_norm_legit.no_stats: {f32, f64},
aten._batch_norm_with_update.default: {f32, f64},
# If the computation dtype is different from the input
# dtype this will fail. CPU execution may also have a
# a different output from other devices.
aten.native_batch_norm.out: {bf16, f16, f32, f64}
}
meta_dispatch_device_skips['cuda'] = {
aten._conj.default: {c32, f16}, # file issue
aten._linalg_svd.default: {c64, c128}, # aten::linalg_eigvalsh.out
aten.cudnn_batch_norm.default: {f32, f64},
aten.log_softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
aten.softmax.int : {c32, c64},
# ROCm stuff; technically this should be expected failure but it's
# not worth it; these should get unified anyway
aten.miopen_batch_norm.default: {f32},
}
from torch.testing._internal.common_methods_invocations import sample_inputs_layer_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_group_norm
from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm
from torch.ao.quantization import FusedMovingAvgObsFakeQuantize
import io
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn.py
|
test_conv_transpose_nhwc_lower_precision
|
def test_conv_transpose_nhwc_lower_precision(self, dtype):
# when torch.ops.mkldnn._is_mkldnn_bf16_supported() or torch.ops.mkldnn._is_mkldnn_fp16_supported()
# returns false, bf16/fp16 CPU conv will fall back to thnn impl
support_checks = {
torch.bfloat16: torch.ops.mkldnn._is_mkldnn_bf16_supported,
torch.float16: torch.ops.mkldnn._is_mkldnn_fp16_supported
}
if support_checks[dtype]():
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose2d, torch.contiguous_format, dtype=dtype)
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose2d, torch.channels_last, dtype=dtype)
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose3d, torch.contiguous_format, dtype=dtype)
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose3d, torch.channels_last_3d, dtype=dtype)
# BF16/FP16 fallback implementations are divided into two parts col2im+gemm,
# and the number of data type conversions in the middle is more than that of onednn's direct conv,
# resulting in additional accuracy loss.
precisions = {
torch.bfloat16: 2e-2,
torch.float16: 3e-3,
}
prec = precisions[dtype]
with torch.backends.mkldnn.flags(enabled=False):
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose2d, torch.contiguous_format, dtype=dtype, prec=prec)
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose2d, torch.channels_last, dtype=dtype, prec=prec)
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose3d, torch.contiguous_format, dtype=dtype, prec=prec)
self._test_conv_deconv_nhwc_base(torch.nn.ConvTranspose3d, torch.channels_last_3d, dtype=dtype, prec=prec)
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mkldnn.py
|
_test_relu_bf16_base
|
def _test_relu_bf16_base(self, name):
x = torch.randn((4, 5), dtype=torch.float32) * 10
x_bf16 = x.bfloat16()
fn = getattr(torch, name)
if has_bf16_support():
y = fn(x.to_mkldnn()).to_dense()
y_bf16 = fn(x_bf16.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = r"bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: fn(x_bf16.to_mkldnn()))
|
def _test_relu_bf16_base(self, name):
x = torch.randn((4, 5), dtype=torch.float32) * 10
x_bf16 = x.bfloat16()
fn = getattr(torch, name)
if torch.ops.mkldnn._is_mkldnn_bf16_supported():
y = fn(x.to_mkldnn()).to_dense()
y_bf16 = fn(x_bf16.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = r"bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: fn(x_bf16.to_mkldnn()))
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn.py
|
test_gelu_bf16
|
def test_gelu_bf16(self):
m = torch.nn.GELU()
x = torch.randn((4, 5), dtype=torch.float32) * 10
x1 = x.clone().to_mkldnn().requires_grad_()
x2 = x.clone().to_mkldnn(torch.bfloat16).requires_grad_()
if has_bf16_support():
y1 = m(x1).to_dense()
y2 = m(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2.to(torch.float32), atol=1e-1, rtol=0)
self.assertEqual(x1.grad.to_dense(), x2.grad.to_dense(torch.float32), atol=1e-2, rtol=0)
else:
msg = r"bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: m(x2))
|
def test_gelu_bf16(self):
m = torch.nn.GELU()
x = torch.randn((4, 5), dtype=torch.float32) * 10
x1 = x.clone().to_mkldnn().requires_grad_()
x2 = x.clone().to_mkldnn(torch.bfloat16).requires_grad_()
if torch.ops.mkldnn._is_mkldnn_bf16_supported():
y1 = m(x1).to_dense()
y2 = m(x2).to_dense()
loss1 = y1.sum()
loss2 = y2.sum()
loss1.backward()
loss2.backward()
self.assertEqual(y1, y2.to(torch.float32), atol=1e-1, rtol=0)
self.assertEqual(x1.grad.to_dense(), x2.grad.to_dense(torch.float32), atol=1e-2, rtol=0)
else:
msg = r"bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: m(x2))
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn.py
|
_test_prelu_bf16_base
|
def _test_prelu_bf16_base(self, size, num_channels):
if has_bf16_support():
x = torch.randn(size, dtype=torch.float32)
x_fp32 = x.clone().to_mkldnn().requires_grad_()
x_bf16 = x.clone().to_mkldnn(torch.bfloat16).requires_grad_()
m = mkldnn_utils.to_mkldnn(torch.nn.PReLU())
m_bf16 = mkldnn_utils.to_mkldnn(torch.nn.PReLU(), torch.bfloat16)
y = m(x_fp32).to_dense()
y_bf16 = m_bf16(x_bf16).to_dense()
self.assertEqual(y, y_bf16.to(torch.float32), atol=1e-1, rtol=1e-3)
loss = y.sum()
loss.backward()
loss_bf16 = y_bf16.sum()
loss_bf16.backward()
self.assertEqual(x_fp32.grad.to_dense(), x_bf16.grad.to_dense(torch.float32))
else:
x_bf16 = torch.randn(size, dtype=torch.bfloat16).requires_grad_()
m_bf16 = mkldnn_utils.to_mkldnn(torch.nn.PReLU(), torch.bfloat16)
msg = r"bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: m_bf16(x_bf16))
|
def _test_prelu_bf16_base(self, size, num_channels):
if torch.ops.mkldnn._is_mkldnn_bf16_supported():
x = torch.randn(size, dtype=torch.float32)
x_fp32 = x.clone().to_mkldnn().requires_grad_()
x_bf16 = x.clone().to_mkldnn(torch.bfloat16).requires_grad_()
m = mkldnn_utils.to_mkldnn(torch.nn.PReLU())
m_bf16 = mkldnn_utils.to_mkldnn(torch.nn.PReLU(), torch.bfloat16)
y = m(x_fp32).to_dense()
y_bf16 = m_bf16(x_bf16).to_dense()
self.assertEqual(y, y_bf16.to(torch.float32), atol=1e-1, rtol=1e-3)
loss = y.sum()
loss.backward()
loss_bf16 = y_bf16.sum()
loss_bf16.backward()
self.assertEqual(x_fp32.grad.to_dense(), x_bf16.grad.to_dense(torch.float32))
else:
x_bf16 = torch.randn(size, dtype=torch.bfloat16).requires_grad_()
m_bf16 = mkldnn_utils.to_mkldnn(torch.nn.PReLU(), torch.bfloat16)
msg = r"bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: m_bf16(x_bf16))
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn.py
|
_test_conv_bf16_base
|
def _test_conv_bf16_base(self, dim):
conv_module = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
input_shapes = {1: (224,), 2: (224, 224), 3: (55, 55, 55)}
options = itertools.product([True, False], [1, 2], [1, 4])
for bias, dilation, groups in options:
N = torch.randint(3, 10, (1,)).item()
M = torch.randint(1, 3, (1,)).item() * groups
C = torch.randint(1, 3, (1,)).item() * groups
x_shape = (N, C) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32)
conv = conv_module[dim](in_channels=C,
out_channels=M,
kernel_size=3,
stride=2,
padding=1,
dilation=dilation,
bias=bias,
groups=groups).float()
x_bf16 = x.bfloat16()
if has_bf16_support():
mkldnn_conv = mkldnn_utils.to_mkldnn(copy.deepcopy(conv))
mkldnn_conv_bf16 = mkldnn_utils.to_mkldnn(copy.deepcopy(conv), torch.bfloat16)
y = mkldnn_conv(x.to_mkldnn()).to_dense()
y_bf16 = mkldnn_conv_bf16(x_bf16.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = r"bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
with self.assertRaisesRegex(RuntimeError, msg):
mkldnn_conv_bf16 = mkldnn_utils.to_mkldnn(copy.deepcopy(conv), torch.bfloat16)
y_bf16 = mkldnn_conv_bf16(x_bf16.to_mkldnn()).to_dense(torch.float32)
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_mkldnn.py
|
test_conv1d_bf16
|
def test_conv1d_bf16(self):
self._test_conv_bf16_base(dim=1)
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_mkldnn.py
|
test_conv2d_bf16
|
def test_conv2d_bf16(self):
self._test_conv_bf16_base(dim=2)
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_mkldnn.py
|
test_conv3d_bf16
|
def test_conv3d_bf16(self):
self._test_conv_bf16_base(dim=3)
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_mkldnn.py
|
test_conv_deconv_3d_lower_precision
|
def test_conv_deconv_3d_lower_precision(self, dtype):
self._test_conv_deconv_lower_precision_base(3, torch.nn.Conv3d, dtype=dtype)
self._test_conv_deconv_lower_precision_base(3, torch.nn.ConvTranspose3d, dtype=dtype)
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mkldnn.py
|
_test_conv_deconv_nhwc_base
|
options = itertools.product([True, False], [True, False], [1, 2], [1, 4])
for train, bias, dilation, groups in options:
N = torch.randint(3, 10, (1,)).item()
M = torch.randint(1, 3, (1,)).item() * groups
C = torch.randint(1, 3, (1,)).item() * groups
x_shape = (N, C) + input_shapes
x = torch.randn(x_shape, dtype=dtype)
# TODO: remove this when group depthwise is supported:
if conv_module is torch.nn.ConvTranspose2d and groups > 1 and C == groups:
continue
# conv1: mkldnn conv in contiguous memory format (nchw)
# conv2: mkldnn conv in channels last memory format (nhwc)
conv1 = conv_module(in_channels=C,
out_channels=M,
kernel_size=3,
stride=2,
padding=1,
dilation=dilation,
bias=bias,
groups=groups).to(dtype=dtype)
conv2 = copy.deepcopy(conv1).to(memory_format=weight_memory_format)
x1 = x.clone()
x2 = x.clone().to(memory_format=torch.channels_last)
if train:
x1.requires_grad_()
x2.requires_grad_()
y1 = conv1(x1)
y2 = conv2(x2)
self.assertEqual(y1, y2)
if train:
y1.sum().backward()
y2.sum().backward()
self.assertTrue(x2.grad.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(conv1.weight.grad,
conv2.weight.grad,
atol=1e-3,
rtol=1e-3)
if bias:
self.assertEqual(conv1.bias.grad, conv2.bias.grad)
self.assertEqual(x1.grad, x2.grad)
|
def _test_conv_deconv_nhwc_base(self, conv_module, weight_memory_format, dtype, prec=None):
input_shapes = {2: (55, 55), 3: (14, 14, 14)}
options = itertools.product([True, False], [True, False], [1, 2], [1, 4])
if conv_module in [torch.nn.Conv2d, torch.nn.ConvTranspose2d]:
cl_format = torch.channels_last
input_shape = input_shapes[2]
elif conv_module in [torch.nn.Conv3d, torch.nn.ConvTranspose3d]:
cl_format = torch.channels_last_3d
input_shape = input_shapes[3]
for train, bias, dilation, groups in options:
N = torch.randint(3, 10, (1,)).item()
M = torch.randint(1, 3, (1,)).item() * groups
C = torch.randint(1, 3, (1,)).item() * groups
x_shape = (N, C) + input_shape
x = torch.randn(x_shape, dtype=dtype)
# conv1: mkldnn conv/deconv in contiguous memory format (nchw)
# conv2: mkldnn conv/deconv in channels last memory format (nhwc)
conv1 = conv_module(in_channels=C,
out_channels=M,
kernel_size=3,
stride=2,
padding=1,
dilation=dilation,
bias=bias,
groups=groups).to(dtype=dtype)
conv2 = copy.deepcopy(conv1).to(memory_format=weight_memory_format)
x1 = x.clone()
x2 = x.clone().to(memory_format=cl_format)
if train:
x1.requires_grad_()
x2.requires_grad_()
y1 = conv1(x1)
y2 = conv2(x2)
self.assertEqual(y1, y2, atol=prec, rtol=prec)
if train:
y1.sum().backward()
y2.sum().backward()
self.assertTrue(x2.grad.is_contiguous(memory_format=cl_format))
self.assertEqual(conv1.weight.grad,
conv2.weight.grad,
atol=1e-3,
rtol=1e-3)
if bias:
self.assertEqual(conv1.bias.grad, conv2.bias.grad, atol=prec, rtol=prec)
self.assertEqual(x1.grad, x2.grad, atol=prec, rtol=prec)
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_mkldnn.py
|
test_conv2d_nhwc_bf16
|
def test_conv2d_nhwc_bf16(self):
# when has_bf16_support() returns false, bf16 CPU conv will fall back to thnn impl
if has_bf16_support():
self._test_conv2d_nhwc_base(torch.nn.Conv2d, torch.contiguous_format, dtype=torch.bfloat16)
self._test_conv2d_nhwc_base(torch.nn.Conv2d, torch.channels_last, dtype=torch.bfloat16)
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_mkldnn.py
|
test_conv_transpose2d_nhwc_bf16
|
def test_conv_transpose2d_nhwc_bf16(self):
# when has_bf16_support() returns false, bf16 CPU conv will fall back to thnn impl
if has_bf16_support():
self._test_conv2d_nhwc_base(torch.nn.ConvTranspose2d, torch.contiguous_format, dtype=torch.bfloat16)
self._test_conv2d_nhwc_base(torch.nn.ConvTranspose2d, torch.channels_last, dtype=torch.bfloat16)
|
self.assertEqual(conv1.bias.grad, conv2.bias.grad, atol=prec, rtol=prec)
self.assertEqual(x1.grad, x2.grad, atol=prec, rtol=prec)
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_mkldnn.py
|
test_conv_nhwc_lower_precision
|
def test_conv_nhwc_lower_precision(self, dtype):
# when torch.ops.mkldnn._is_mkldnn_bf16_supported() or torch.ops.mkldnn._is_mkldnn_fp16_supported()
# returns false, bf16/fp16 CPU conv will fall back to thnn impl
support_checks = {
torch.bfloat16: torch.ops.mkldnn._is_mkldnn_bf16_supported,
torch.float16: torch.ops.mkldnn._is_mkldnn_fp16_supported
}
if support_checks[dtype]():
self._test_conv_deconv_nhwc_base(torch.nn.Conv2d, torch.contiguous_format, dtype=dtype)
self._test_conv_deconv_nhwc_base(torch.nn.Conv2d, torch.channels_last, dtype=dtype)
self._test_conv_deconv_nhwc_base(torch.nn.Conv3d, torch.contiguous_format, dtype=dtype)
self._test_conv_deconv_nhwc_base(torch.nn.Conv3d, torch.channels_last_3d, dtype=dtype)
# BF16/FP16 fallback implementations are divided into two parts im2col+gemm,
# and the number of data type conversions in the middle is more than that of onednn's direct conv,
# resulting in additional accuracy loss.
precisions = {
torch.bfloat16: 1e-2,
torch.float16: 2e-3,
}
prec = precisions[dtype]
with torch.backends.mkldnn.flags(enabled=False):
self._test_conv_deconv_nhwc_base(torch.nn.Conv2d, torch.contiguous_format, dtype=dtype, prec=prec)
self._test_conv_deconv_nhwc_base(torch.nn.Conv2d, torch.channels_last, dtype=dtype, prec=prec)
self._test_conv_deconv_nhwc_base(torch.nn.Conv3d, torch.contiguous_format, dtype=dtype, prec=prec)
self._test_conv_deconv_nhwc_base(torch.nn.Conv3d, torch.channels_last_3d, dtype=dtype, prec=prec)
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mkldnn.py
|
test_adaptive_avg_pool2d_bf16
|
def test_adaptive_avg_pool2d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 224, 224, dtype=torch.float32) * 100
x_bf16 = x.bfloat16()
adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d(7)
if has_bf16_support():
y = adaptive_avg_pool2d(x.to_mkldnn()).to_dense()
y_bf16 = adaptive_avg_pool2d(x.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = "mkldnn_adaptive_avg_pool2d: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: adaptive_avg_pool2d(x_bf16.to_mkldnn()))
|
def test_adaptive_avg_pool2d_bf16(self):
N = torch.randint(3, 10, (1,)).item()
C = torch.randint(3, 10, (1,)).item()
x = torch.randn(N, C, 224, 224, dtype=torch.float32) * 100
x_bf16 = x.bfloat16()
adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d(7)
if torch.ops.mkldnn._is_mkldnn_bf16_supported():
y = adaptive_avg_pool2d(x.to_mkldnn()).to_dense()
y_bf16 = adaptive_avg_pool2d(x.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = "mkldnn_adaptive_avg_pool2d: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: adaptive_avg_pool2d(x_bf16.to_mkldnn()))
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn.py
|
_test_batch_norm_bf16_base
|
def _test_batch_norm_bf16_base(self, dim, channels, input):
bn_module = {2 : torch.nn.BatchNorm2d, 3 : torch.nn.BatchNorm3d}
x_bf16 = input.bfloat16()
# TODO: support training
for train in [False]:
bn = bn_module[dim](channels).float().train(train)
mkldnn_bn = mkldnn_utils.to_mkldnn(copy.deepcopy(bn))
if has_bf16_support():
y = bn(input.to_mkldnn().to_dense())
y_bf16 = bn(input.to_mkldnn().to_dense(torch.float))
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = "mkldnn_batch_norm: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: bn(x_bf16.to_mkldnn()))
|
def _test_batch_norm_bf16_base(self, dim, channels, input):
bn_module = {2 : torch.nn.BatchNorm2d, 3 : torch.nn.BatchNorm3d}
x_bf16 = input.bfloat16()
# TODO: support training
for train in [False]:
bn = bn_module[dim](channels).float().train(train)
mkldnn_bn = mkldnn_utils.to_mkldnn(copy.deepcopy(bn))
if torch.ops.mkldnn._is_mkldnn_bf16_supported():
y = bn(input.to_mkldnn().to_dense())
y_bf16 = bn(input.to_mkldnn().to_dense(torch.float))
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = "mkldnn_batch_norm: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: bn(x_bf16.to_mkldnn()))
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn_fusion.py
|
test_linear_binary_fusion_ops
|
def test_linear_binary_fusion_ops(self):
class M(nn.Module):
def __init__(self, binary_fn, in_channels, out_channels, bias, **kwargs):
super().__init__()
self.linear = torch.nn.Linear(
in_channels, out_channels, bias=bias, **kwargs
)
self.binary = binary_fn
def forward(self, x, other):
x = self.linear(x)
x = self.binary(x, other)
return x
out_feature = 20
for pointwise_name, pointwise_fn in self._binary_list().items():
options = itertools.product([[2, 3, 10], [2, 10]], [True, False])
for input_shape, bias in options:
with torch.no_grad():
mod = M(pointwise_fn, input_shape[-1], out_feature, bias).eval()
v = torch.randn(input_shape)
other = torch.randn(input_shape[:-1] + [out_feature])
ref = mod(v, other)
attr = pointwise_name
fused = torch.ops.mkldnn._linear_pointwise(
v, other, mod.linear.weight, mod.linear.bias, attr
)
self.assertEqual(ref, fused)
|
def test_linear_binary_fusion_ops(self):
class M(nn.Module):
def __init__(self, binary_fn, in_channels, out_channels, bias, **kwargs):
super().__init__()
self.linear = torch.nn.Linear(
in_channels, out_channels, bias=bias, **kwargs
)
self.binary = binary_fn
def forward(self, x, other):
x = self.linear(x)
x = self.binary(x, other)
return x
out_feature = 20
for pointwise_name, pointwise_fn in self._binary_list().items():
# Tensor with size = [1, 10] and stride = [0, 1] is contiguous tensor
# but it's strides is not default contiguous strides.
options = itertools.product([[[2, 3, 10], None], [[2, 10], None], [[1, 10], [0, 1]]], [True, False])
for (input_shape, input_stride), bias in options:
with torch.no_grad():
mod = M(pointwise_fn, input_shape[-1], out_feature, bias).eval()
v = torch.randn(input_shape)
if input_stride is not None:
v = v.as_strided(input_shape, input_stride)
other = torch.randn(input_shape[:-1] + [out_feature])
ref = mod(v, other)
attr = pointwise_name
fused = torch.ops.mkldnn._linear_pointwise(
v, other, mod.linear.weight, mod.linear.bias, attr
)
self.assertEqual(ref, fused)
|
import itertools
import unittest
from typing import NamedTuple, List
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
from test_tensorexpr import warmup_and_run_forward
FUSION_GROUP = 'prim::TensorExprGroup'
CONV_MODULES = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
CONV_TRANSPOSE_MODULES = {2: torch.nn.ConvTranspose2d}
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnnFusion(JitTestCase):
|
import itertools
import unittest
from typing import NamedTuple, List
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase
from test_tensorexpr import warmup_and_run_forward
FUSION_GROUP = 'prim::TensorExprGroup'
CONV_MODULES = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
CONV_TRANSPOSE_MODULES = {2: torch.nn.ConvTranspose2d}
@skipIfTorchDynamo("too slow")
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnnFusion(JitTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn_fusion.py
|
test_conv_transpose_unary_fusion_ops
|
def test_conv_transpose_unary_fusion_ops(self):
class M(nn.Module):
def __init__(self, unary_fn, dim, in_channels, out_channels, kernel_size, **kwargs):
super().__init__()
self.conv_transpose = CONV_TRANSPOSE_MODULES[dim](in_channels, out_channels, kernel_size, **kwargs)
self.unary = unary_fn
def forward(self, x):
x = self.conv_transpose(x)
x = self.unary(x)
return x
input_shapes = {2: (28, 28)}
kernel_size = 3
for pointwise_name, pointwise_info in self._unary_list().items():
for dim in [2]:
channels_last = torch.channels_last if dim == 2 else torch.channels_last_3d
options = itertools.product([True, False], [1, 2], [1, 4], [torch.contiguous_format, channels_last], [False, True])
for bias, dilation, groups, memory_format, prepack_weight in options:
oC = 32 * groups
iC = 3 * groups
x_shape = (1, iC) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32).to(memory_format=memory_format)
mod = M(pointwise_info.pointwise_module, dim, iC, oC, kernel_size, dilation=dilation, groups=groups, bias=bias)
mod = mod.to(memory_format=memory_format).eval()
with torch.no_grad():
ref = mod(x)
attr = pointwise_info.attr
scalars = pointwise_info.scalars
algorithm = pointwise_info.algorithm
if prepack_weight:
packed_weight = torch.ops.mkldnn._reorder_convolution_transpose_weight(
mod.conv_transpose.weight.to_mkldnn(),
mod.conv_transpose.padding,
mod.conv_transpose.output_padding,
mod.conv_transpose.stride,
mod.conv_transpose.dilation,
mod.conv_transpose.groups,
x.size())
mod.conv_transpose.weight = torch.nn.Parameter(
packed_weight,
requires_grad=mod.conv_transpose.weight.requires_grad,
)
fused = torch.ops.mkldnn._convolution_transpose_pointwise(
x,
mod.conv_transpose.weight,
mod.conv_transpose.bias,
mod.conv_transpose.padding,
mod.conv_transpose.output_padding,
mod.conv_transpose.stride,
mod.conv_transpose.dilation,
mod.conv_transpose.groups,
attr,
scalars,
algorithm)
self.assertEqual(ref, fused)
|
def test_conv_transpose_unary_fusion_ops(self):
class M(nn.Module):
def __init__(self, unary_fn, dim, in_channels, out_channels, kernel_size, **kwargs):
super().__init__()
self.conv_transpose = CONV_TRANSPOSE_MODULES[dim](in_channels, out_channels, kernel_size, **kwargs)
self.unary = unary_fn
def forward(self, x):
x = self.conv_transpose(x)
x = self.unary(x)
return x
input_shapes = {2: (28, 28)}
kernel_size = 3
for pointwise_info in self._unary_list().values():
for dim in [2]:
channels_last = torch.channels_last if dim == 2 else torch.channels_last_3d
options = itertools.product([True, False], [1, 2], [1, 4], [torch.contiguous_format, channels_last], [False, True])
for bias, dilation, groups, memory_format, prepack_weight in options:
oC = 32 * groups
iC = 3 * groups
x_shape = (1, iC) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32).to(memory_format=memory_format)
mod = M(pointwise_info.pointwise_module, dim, iC, oC, kernel_size, dilation=dilation, groups=groups, bias=bias)
mod = mod.to(memory_format=memory_format).eval()
with torch.no_grad():
ref = mod(x)
attr = pointwise_info.attr
scalars = pointwise_info.scalars
algorithm = pointwise_info.algorithm
if prepack_weight:
packed_weight = torch.ops.mkldnn._reorder_convolution_transpose_weight(
mod.conv_transpose.weight,
mod.conv_transpose.padding,
mod.conv_transpose.output_padding,
mod.conv_transpose.stride,
mod.conv_transpose.dilation,
mod.conv_transpose.groups,
x.size())
mod.conv_transpose.weight = torch.nn.Parameter(
packed_weight,
requires_grad=mod.conv_transpose.weight.requires_grad,
)
fused = torch.ops.mkldnn._convolution_transpose_pointwise(
x,
mod.conv_transpose.weight,
mod.conv_transpose.bias,
mod.conv_transpose.padding,
mod.conv_transpose.output_padding,
mod.conv_transpose.stride,
mod.conv_transpose.dilation,
mod.conv_transpose.groups,
attr,
scalars,
algorithm)
self.assertEqual(ref, fused)
|
import itertools
import unittest
from typing import NamedTuple, List
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
from test_tensorexpr import warmup_and_run_forward
FUSION_GROUP = 'prim::TensorExprGroup'
CONV_MODULES = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
CONV_TRANSPOSE_MODULES = {2: torch.nn.ConvTranspose2d}
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnnFusion(JitTestCase):
|
import itertools
import unittest
from typing import NamedTuple, List
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase
from test_tensorexpr import warmup_and_run_forward
FUSION_GROUP = 'prim::TensorExprGroup'
CONV_MODULES = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
CONV_TRANSPOSE_MODULES = {2: torch.nn.ConvTranspose2d}
@skipIfTorchDynamo("too slow")
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnnFusion(JitTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn.py
|
_test_max_pool_bf16_base
|
def _test_max_pool_bf16_base(self, dim, input):
pool_module = {2: torch.nn.MaxPool2d, 3: torch.nn.MaxPool3d}
x_bf16 = input.bfloat16()
for stride in [1, 2, 3]:
for ceil_mode in [False, True]:
max_pool = pool_module[dim](
kernel_size=3 if not ceil_mode else 7,
stride=stride,
padding=1,
ceil_mode=ceil_mode)
if has_bf16_support():
y = max_pool(input.to_mkldnn()).to_dense()
y_bf16 = max_pool(x_bf16.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=0.1, rtol=1e-3)
else:
msg = "mkldnn_max_pool%dd: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq" % dim
self.assertRaisesRegex(RuntimeError,
msg,
lambda: max_pool(x_bf16.to_mkldnn()))
|
def _test_max_pool_bf16_base(self, dim, input):
pool_module = {2: torch.nn.MaxPool2d, 3: torch.nn.MaxPool3d}
x_bf16 = input.bfloat16()
for stride in [1, 2, 3]:
for ceil_mode in [False, True]:
max_pool = pool_module[dim](
kernel_size=3 if not ceil_mode else 7,
stride=stride,
padding=1,
ceil_mode=ceil_mode)
if torch.ops.mkldnn._is_mkldnn_bf16_supported():
y = max_pool(input.to_mkldnn()).to_dense()
y_bf16 = max_pool(x_bf16.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=0.1, rtol=1e-3)
else:
msg = "mkldnn_max_pool%dd: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq" % dim
self.assertRaisesRegex(RuntimeError,
msg,
lambda: max_pool(x_bf16.to_mkldnn()))
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn.py
|
_test_avg_pool_bf16_base
|
def _test_avg_pool_bf16_base(self, dim, input):
avg_module = {2: torch.nn.AvgPool2d, 3: torch.nn.AvgPool3d}
x_bf16 = input.bfloat16()
for count_include_pad in [True, False]:
avg_pool = avg_module[dim](
kernel_size=3,
stride=2,
padding=1,
count_include_pad=count_include_pad)
if has_bf16_support():
y = avg_pool(input.to_mkldnn()).to_dense()
y_bf16 = avg_pool(x_bf16.to_mkldnn()).to_dense(torch.float)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = "mkldnn_avg_pool%dd: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq" % dim
self.assertRaisesRegex(RuntimeError,
msg,
lambda: avg_pool(x_bf16.to_mkldnn()))
|
def _test_avg_pool_bf16_base(self, dim, input):
avg_module = {2: torch.nn.AvgPool2d, 3: torch.nn.AvgPool3d}
x_bf16 = input.bfloat16()
for count_include_pad in [True, False]:
avg_pool = avg_module[dim](
kernel_size=3,
stride=2,
padding=1,
count_include_pad=count_include_pad)
if torch.ops.mkldnn._is_mkldnn_bf16_supported():
y = avg_pool(input.to_mkldnn()).to_dense()
y_bf16 = avg_pool(x_bf16.to_mkldnn()).to_dense(torch.float)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = "mkldnn_avg_pool%dd: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq" % dim
self.assertRaisesRegex(RuntimeError,
msg,
lambda: avg_pool(x_bf16.to_mkldnn()))
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn.py
|
test_resnext50_32x4d
|
def test_resnext50_32x4d(self):
model = torchvision.models.resnet.resnext50_32x4d(pretrained=False)
self._test_imagenet_model(model)
|
def test_resnext50_32x4d(self):
model = torchvision.models.resnet.resnext50_32x4d(weights=None)
self._test_imagenet_model(model)
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn.py
|
_lstm_params_list
|
def _lstm_params_list(self):
params_dict = {
"input_size": [1, 5],
"hidden_size": [5, 16],
"num_layers": [1, 3],
"bidirectional": [False, True],
"bias": [False, True],
"batch_first": [False, True],
"dropout": [0, 0.4, 0.7, 1],
"batch_size": [1, 2],
"seq_len": [1, 3],
"training": [False, True]
}
params_list = []
for _, value in params_dict.items():
params_list.append(value)
return params_list
|
def _lstm_params_list(self):
params_dict = {
"input_size": [1, 5],
"hidden_size": [5, 16],
"num_layers": [1, 3],
"bidirectional": [False, True],
"bias": [False, True],
"batch_first": [False, True],
"dropout": [0, 0.4, 0.7, 1],
"batch_size": [1, 2],
"seq_len": [1, 3],
"training": [False, True]
}
params_list = list(params_dict.values())
return params_list
|
import copy
import itertools
import functools
import unittest
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
import sys
types = [torch.float, torch.bfloat16]
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn.py
|
test_matmul_lower_precision
|
def test_matmul_lower_precision(self, dtype):
support_check = {
torch.bfloat16: torch.ops.mkldnn._is_mkldnn_bf16_supported,
torch.float16: torch.ops.mkldnn._is_mkldnn_fp16_supported,
}
def common(self, shape1, shape2, op, dtype):
a = torch.randn(shape1, dtype=dtype)
a_ref = a.float()
b = torch.randn(shape2, dtype=dtype)
b_ref = b.float()
y = op(a, b)
y_ref = op(a_ref, b_ref)
self.assertEqual(y, y_ref, exact_dtype=False)
if support_check[dtype]():
a1 = torch.randn([64, 1, 33], dtype=dtype)
# a2 is contiguous tensor but it's strides
# is not default contiguous strides.
a2 = torch.as_strided(a1.clone(), [64, 1, 33], [33, 3, 1])
self.assertTrue(a2.is_contiguous())
b = torch.randn(64, 33, 256).to(dtype=dtype)
y1 = torch.ops.aten.bmm(a1, b)
y2 = torch.bmm(a2, b)
self.assertEqual(y1, y2)
for shape1, shape2, op in [
((33, 77), (77, 22), torch.matmul),
((128, 256), (256, 10), torch.matmul),
((7, 300), (300, 3), torch.matmul),
((1, 100), (100, 60), torch.matmul),
((100, 1), (1, 100), torch.matmul),
((20, 54, 78), (20, 78, 10), torch.bmm),
((1, 300, 1), (1, 1, 300), torch.bmm),
]:
common(self, shape1, shape2, op, dtype)
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mkldnn.py
|
common
|
if __name__ == '__main__':
run_tests()
|
def common(self, shape1, shape2, op, dtype):
a = torch.randn(shape1, dtype=dtype)
a_ref = a.float()
b = torch.randn(shape2, dtype=dtype)
b_ref = b.float()
y = op(a, b)
y_ref = op(a_ref, b_ref)
self.assertEqual(y, y_ref, exact_dtype=False)
if support_check[dtype]():
a1 = torch.randn([64, 1, 33], dtype=dtype)
# a2 is contiguous tensor but it's strides
# is not default contiguous strides.
a2 = torch.as_strided(a1.clone(), [64, 1, 33], [33, 3, 1])
self.assertTrue(a2.is_contiguous())
b = torch.randn(64, 33, 256).to(dtype=dtype)
y1 = torch.ops.aten.bmm(a1, b)
y2 = torch.bmm(a2, b)
self.assertEqual(y1, y2)
for shape1, shape2, op in [
((33, 77), (77, 22), torch.matmul),
((128, 256), (256, 10), torch.matmul),
((7, 300), (300, 3), torch.matmul),
((1, 100), (100, 60), torch.matmul),
((100, 1), (1, 100), torch.matmul),
((20, 54, 78), (20, 78, 10), torch.bmm),
((1, 300, 1), (1, 1, 300), torch.bmm),
]:
common(self, shape1, shape2, op, dtype)
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_mobile_optimizer.py
|
test_quantized_conv_no_asan_failures
|
def test_quantized_conv_no_asan_failures(self):
# There were ASAN failures when fold_conv_bn was run on
# already quantized conv modules. Verifying that this does
# not happen again.
if 'qnnpack' not in torch.backends.quantized.supported_engines:
return
class Child(nn.Module):
def __init__(self):
super().__init__()
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv2(x)
return x
class Parent(nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.ao.quantization.QuantStub()
self.conv1 = nn.Conv2d(1, 1, 1)
self.child = Child()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.child(x)
x = self.dequant(x)
return x
with override_quantized_engine('qnnpack'):
model = Parent()
model.qconfig = torch.ao.quantization.get_default_qconfig('qnnpack')
torch.ao.quantization.prepare(model, inplace=True)
model(torch.randn(4, 1, 4, 4))
torch.ao.quantization.convert(model, inplace=True)
model = torch.jit.script(model)
# this line should not have ASAN failures
model_optim = optimize_for_mobile(model)
|
def test_quantized_conv_no_asan_failures(self):
# There were ASAN failures when fold_conv_bn was run on
# already quantized conv modules. Verifying that this does
# not happen again.
if 'qnnpack' not in torch.backends.quantized.supported_engines:
return
class Child(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv2(x)
return x
class Parent(nn.Module):
def __init__(self) -> None:
super().__init__()
self.quant = torch.ao.quantization.QuantStub()
self.conv1 = nn.Conv2d(1, 1, 1)
self.child = Child()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.child(x)
x = self.dequant(x)
return x
with override_quantized_engine('qnnpack'):
model = Parent()
model.qconfig = torch.ao.quantization.get_default_qconfig('qnnpack')
torch.ao.quantization.prepare(model, inplace=True)
model(torch.randn(4, 1, 4, 4))
torch.ao.quantization.convert(model, inplace=True)
model = torch.jit.script(model)
# this line should not have ASAN failures
model_optim = optimize_for_mobile(model)
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class TestOptimizer(TestCase):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class TestOptimizer(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn_fusion.py
|
test_linear_unary_fusion_ops
|
def test_linear_unary_fusion_ops(self):
class M(nn.Module):
def __init__(self, unary_fn, in_channels, out_channels, bias, **kwargs):
super().__init__()
self.linear = torch.nn.Linear(
in_channels, out_channels, bias=bias, **kwargs
)
self.unary = unary_fn
def forward(self, x):
x = self.linear(x)
x = self.unary(x)
return x
for pointwise_name, pointwise_info in self._unary_list().items():
options = itertools.product([[2, 3, 10], [2, 10]], [True, False])
for input_shape, bias in options:
with torch.no_grad():
mod = M(pointwise_info.pointwise_module, input_shape[-1], 10, bias).eval()
v = torch.randn(input_shape)
ref = mod(v)
attr = pointwise_info.attr
scalars = pointwise_info.scalars
algorithm = pointwise_info.algorithm
fused = torch.ops.mkldnn._linear_pointwise(
v, mod.linear.weight, mod.linear.bias, attr, scalars, algorithm
)
self.assertEqual(ref, fused)
|
def test_linear_unary_fusion_ops(self):
class M(nn.Module):
def __init__(self, unary_fn, in_channels, out_channels, bias, **kwargs):
super().__init__()
self.linear = torch.nn.Linear(
in_channels, out_channels, bias=bias, **kwargs
)
self.unary = unary_fn
def forward(self, x):
x = self.linear(x)
x = self.unary(x)
return x
for pointwise_info in self._unary_list().values():
# Tensor with size = [1, 10] and stride = [0, 1] is contiguous tensor
# but it's strides is not default contiguous strides.
options = itertools.product([[[2, 3, 10], None], [[2, 10], None], [[1, 10], [0, 1]]], [True, False])
for (input_shape, input_stride), bias in options:
with torch.no_grad():
mod = M(pointwise_info.pointwise_module, input_shape[-1], 10, bias).eval()
v = torch.randn(input_shape)
if input_stride is not None:
v = v.as_strided(input_shape, input_stride)
ref = mod(v)
attr = pointwise_info.attr
scalars = pointwise_info.scalars
algorithm = pointwise_info.algorithm
fused = torch.ops.mkldnn._linear_pointwise(
v, mod.linear.weight, mod.linear.bias, attr, scalars, algorithm
)
self.assertEqual(ref, fused)
|
import itertools
import unittest
from typing import NamedTuple, List
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
from test_tensorexpr import warmup_and_run_forward
FUSION_GROUP = 'prim::TensorExprGroup'
CONV_MODULES = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
CONV_TRANSPOSE_MODULES = {2: torch.nn.ConvTranspose2d}
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnnFusion(JitTestCase):
|
import itertools
import unittest
from typing import NamedTuple, List
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase
from test_tensorexpr import warmup_and_run_forward
FUSION_GROUP = 'prim::TensorExprGroup'
CONV_MODULES = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
CONV_TRANSPOSE_MODULES = {2: torch.nn.ConvTranspose2d}
@skipIfTorchDynamo("too slow")
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnnFusion(JitTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn_fusion.py
|
test_conv_unary_fusion_ops
|
def test_conv_unary_fusion_ops(self):
class M(nn.Module):
def __init__(self, unary_fn, dim, in_channels, out_channels, dilation, groups, bias, **kwargs):
super().__init__()
self.conv = CONV_MODULES[dim](in_channels, out_channels, dilation=dilation, groups=groups, bias=bias, **kwargs)
self.unary = unary_fn
def forward(self, x):
x = self.conv(x)
x = self.unary(x)
return x
input_shapes = {2: (112, 112), 3: (55, 55, 55)}
for pointwise_name, pointwise_info in self._unary_list().items():
for dim in [2, 3]:
channels_last = torch.channels_last if dim == 2 else torch.channels_last_3d
options = itertools.product([True, False], [1, 2], [1, 4], [torch.contiguous_format, channels_last])
for bias, dilation, groups, memory_format in options:
oC = 32 * groups
iC = 3 * groups
x_shape = (1, iC) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32).to(memory_format=memory_format)
mod = M(pointwise_info.pointwise_module, dim, iC, oC, dilation, groups, bias, kernel_size=3)
mod = mod.to(memory_format=memory_format).eval()
with torch.no_grad():
ref = mod(x)
attr = pointwise_info.attr
scalars = pointwise_info.scalars
algorithm = pointwise_info.algorithm
fused = torch.ops.mkldnn._convolution_pointwise(
x, mod.conv.weight, mod.conv.bias, mod.conv.padding, mod.conv.stride, mod.conv.dilation,
mod.conv.groups, attr, scalars, algorithm
)
self.assertEqual(ref, fused)
|
def test_conv_unary_fusion_ops(self):
class M(nn.Module):
def __init__(self, unary_fn, dim, in_channels, out_channels, dilation, groups, bias, **kwargs):
super().__init__()
self.conv = CONV_MODULES[dim](in_channels, out_channels, dilation=dilation, groups=groups, bias=bias, **kwargs)
self.unary = unary_fn
def forward(self, x):
x = self.conv(x)
x = self.unary(x)
return x
input_shapes = {2: (112, 112), 3: (55, 55, 55)}
for pointwise_info in self._unary_list().values():
for dim in [2, 3]:
channels_last = torch.channels_last if dim == 2 else torch.channels_last_3d
options = itertools.product([True, False], [1, 2], [1, 4], [torch.contiguous_format, channels_last])
for bias, dilation, groups, memory_format in options:
oC = 32 * groups
iC = 3 * groups
x_shape = (1, iC) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32).to(memory_format=memory_format)
mod = M(pointwise_info.pointwise_module, dim, iC, oC, dilation, groups, bias, kernel_size=3)
mod = mod.to(memory_format=memory_format).eval()
with torch.no_grad():
ref = mod(x)
attr = pointwise_info.attr
scalars = pointwise_info.scalars
algorithm = pointwise_info.algorithm
fused = torch.ops.mkldnn._convolution_pointwise(
x, mod.conv.weight, mod.conv.bias, mod.conv.padding, mod.conv.stride, mod.conv.dilation,
mod.conv.groups, attr, scalars, algorithm
)
self.assertEqual(ref, fused)
|
import itertools
import unittest
from typing import NamedTuple, List
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
from test_tensorexpr import warmup_and_run_forward
FUSION_GROUP = 'prim::TensorExprGroup'
CONV_MODULES = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
CONV_TRANSPOSE_MODULES = {2: torch.nn.ConvTranspose2d}
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnnFusion(JitTestCase):
|
import itertools
import unittest
from typing import NamedTuple, List
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase
from test_tensorexpr import warmup_and_run_forward
FUSION_GROUP = 'prim::TensorExprGroup'
CONV_MODULES = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
CONV_TRANSPOSE_MODULES = {2: torch.nn.ConvTranspose2d}
@skipIfTorchDynamo("too slow")
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnnFusion(JitTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn_fusion.py
|
test_conv_binary_fusion_ops
|
def test_conv_binary_fusion_ops(self):
class M(nn.Module):
def __init__(self, binary_fn, dim, in_channels, out_channels, dilation, groups, bias, **kwargs):
super().__init__()
self.conv = CONV_MODULES[dim](in_channels, out_channels, dilation=dilation, groups=groups, bias=bias, **kwargs)
self.binary = binary_fn
def forward(self, x, other):
x = self.conv(x)
x = self.binary(x, other)
return x
input_shapes = {2: (112, 112), 3: (55, 55, 55)}
for pointwise_name, pointwise_fn in self._binary_list().items():
for dim in [2, 3]:
channels_last = torch.channels_last if dim == 2 else torch.channels_last_3d
options = itertools.product([False, True], [True, False], [1, 2], [1, 4], [torch.contiguous_format, channels_last])
for fuse_relu, bias, dilation, groups, memory_format in options:
oC = 32 * groups
iC = 3 * groups
x_shape = (1, iC) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32).to(memory_format=memory_format)
mod = M(pointwise_fn, dim, iC, oC, dilation, groups, bias, kernel_size=3)
mod = mod.to(memory_format=memory_format).eval()
other = torch.randn_like(mod.conv(x))
with torch.no_grad():
ref = mod(x, other)
unary_attr = None
if fuse_relu:
ref.relu_()
unary_attr = "relu"
attr = pointwise_name
fused = torch.ops.mkldnn._convolution_pointwise(
x, other, mod.conv.weight, mod.conv.bias, mod.conv.padding, mod.conv.stride, mod.conv.dilation,
mod.conv.groups, attr, None, unary_attr, [], None
)
# for binary add, we support inplace version.
if attr == "add":
fused_inplace = torch.ops.mkldnn._convolution_pointwise_(
x, other, mod.conv.weight, mod.conv.bias, mod.conv.padding, mod.conv.stride, mod.conv.dilation,
mod.conv.groups, attr, None, unary_attr, [], None
)
self.assertEqual(ref, other)
self.assertEqual(ref, fused_inplace)
self.assertEqual(ref, fused)
|
def test_conv_binary_fusion_ops(self):
class M(nn.Module):
def __init__(self, binary_fn, dim, in_channels, out_channels, dilation, groups, bias, **kwargs):
super().__init__()
self.conv = CONV_MODULES[dim](in_channels, out_channels, dilation=dilation, groups=groups, bias=bias, **kwargs)
self.binary = binary_fn
def forward(self, x, other):
x = self.conv(x)
x = self.binary(x, other)
return x
input_shapes = {2: (112, 112), 3: (22, 22, 22)}
for pointwise_name, pointwise_fn in self._binary_list().items():
for dim in [2, 3]:
channels_last = torch.channels_last if dim == 2 else torch.channels_last_3d
options = itertools.product([False, True], [True, False], [1, 2], [1, 4], [torch.contiguous_format, channels_last])
for fuse_relu, bias, dilation, groups, memory_format in options:
oC = 32 * groups
iC = 3 * groups
x_shape = (1, iC) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32).to(memory_format=memory_format)
mod = M(pointwise_fn, dim, iC, oC, dilation, groups, bias, kernel_size=3)
mod = mod.to(memory_format=memory_format).eval()
other = torch.randn_like(mod.conv(x))
with torch.no_grad():
ref = mod(x, other)
unary_attr = None
if fuse_relu:
ref.relu_()
unary_attr = "relu"
attr = pointwise_name
fused = torch.ops.mkldnn._convolution_pointwise(
x, other, mod.conv.weight, mod.conv.bias, mod.conv.padding, mod.conv.stride, mod.conv.dilation,
mod.conv.groups, attr, None, unary_attr, [], None
)
# for binary add, we support inplace version.
if attr == "add":
fused_inplace = torch.ops.mkldnn._convolution_pointwise_(
other, x, mod.conv.weight, mod.conv.bias, mod.conv.padding, mod.conv.stride, mod.conv.dilation,
mod.conv.groups, attr, None, unary_attr, [], None
)
self.assertEqual(ref, other)
self.assertEqual(ref, fused_inplace)
self.assertEqual(ref, fused, atol=5e-4, rtol=5e-4)
|
import itertools
import unittest
from typing import NamedTuple, List
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
from test_tensorexpr import warmup_and_run_forward
FUSION_GROUP = 'prim::TensorExprGroup'
CONV_MODULES = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
CONV_TRANSPOSE_MODULES = {2: torch.nn.ConvTranspose2d}
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnnFusion(JitTestCase):
|
import itertools
import unittest
from typing import NamedTuple, List
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase
from test_tensorexpr import warmup_and_run_forward
FUSION_GROUP = 'prim::TensorExprGroup'
CONV_MODULES = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
CONV_TRANSPOSE_MODULES = {2: torch.nn.ConvTranspose2d}
@skipIfTorchDynamo("too slow")
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnnFusion(JitTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mkldnn.py
|
test_linear_lowp
|
in_features = torch.randint(3, 10, (1,)).item()
out_features = torch.randint(3, 100, (1,)).item()
x = torch.randn(3, in_features, dtype=torch.float32) * 10
x_bf16 = x.bfloat16()
for bias in [True, False]:
linear = torch.nn.Linear(in_features, out_features, bias=bias).float()
mkldnn_linear = mkldnn_utils.to_mkldnn(copy.deepcopy(linear))
mkldnn_linear_bf16 = mkldnn_utils.to_mkldnn(copy.deepcopy(linear), torch.bfloat16)
if has_bf16_support():
y = mkldnn_linear(x.to_mkldnn()).to_dense()
y_bf16 = mkldnn_linear_bf16(x_bf16.to_mkldnn()).to_dense(torch.float32)
self.assertEqual(y, y_bf16, atol=1e-1, rtol=1e-3)
else:
msg = "mkldnn_linear: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq"
self.assertRaisesRegex(RuntimeError,
msg,
lambda: mkldnn_linear_bf16(x_bf16.to_mkldnn()))
|
def test_linear_lowp(self, dtype):
in_features = torch.randint(3, 10, (1,)).item()
out_features = torch.randint(3, 100, (1,)).item()
x = torch.randn(3, in_features, dtype=torch.float32) * 10
x_lowp = x.to(dtype=dtype)
for bias in [True, False]:
linear = torch.nn.Linear(in_features, out_features, bias=bias).float()
mkldnn_linear = mkldnn_utils.to_mkldnn(copy.deepcopy(linear))
mkldnn_linear_lowp = mkldnn_utils.to_mkldnn(
copy.deepcopy(linear), dtype
)
lowp_support = {
torch.bfloat16: torch.ops.mkldnn._is_mkldnn_bf16_supported,
torch.half: torch.ops.mkldnn._is_mkldnn_fp16_supported,
}
if lowp_support[dtype]():
y = mkldnn_linear(x.to_mkldnn()).to_dense()
y_lowp = mkldnn_linear_lowp(x_lowp.to_mkldnn()).to_dense(
torch.float32
)
if dtype == torch.bfloat16:
self.assertEqual(y, y_lowp, atol=1e-1, rtol=1e-3)
else:
self.assertEqual(y, y_lowp, atol=5e-3, rtol=1e-3)
else:
msg = {
torch.bfloat16: r"bf16 path needs the cpu support avx_ne_convert or avx512bw, avx512vl and avx512dq",
torch.half: r"fp16 path needs the cpu support avx_ne_convert or avx512_fp16",
}
self.assertRaisesRegex(
RuntimeError,
msg[dtype],
lambda: mkldnn_linear_lowp(x_lowp.to_mkldnn()),
)
|
import copy
import itertools
import functools
import unittest
from contextlib import nullcontext
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS, \
skipIfTorchDynamo, xfailIfTorchDynamo
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
dtypes,
)
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
types = [torch.float, torch.bfloat16, torch.half]
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_mobile_optimizer.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
fuse_model
|
def fuse_model(self):
torch.ao.quantization.fuse_modules(self, [['conv2', 'relu']], inplace=True)
pass
|
def fuse_model(self):
torch.ao.quantization.fuse_modules(self, [['conv2', 'relu']], inplace=True)
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class Standalone(nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class Standalone(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
fuse_model
|
def fuse_model(self):
torch.ao.quantization.fuse_modules(self, [['conv2', 'relu']], inplace=True)
pass
|
def fuse_model(self):
torch.ao.quantization.fuse_modules(self, [['conv2', 'relu']], inplace=True)
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class Standalone(nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class Standalone(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
test_clone_module_with_class
|
def test_clone_module_with_class(self):
class MyInnerTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.pqr = torch.Tensor([10., 20., 30.])
def forward(self, inputs):
return inputs
@torch.jit.export
def dummy_method_not_cloned(self):
return 20
class MyTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.abc = 23
self.pqr = torch.Tensor([1., 2., 3.])
self.inner = MyInnerTestModule()
def forward(self, inputs):
x = self.dummy_method_cloned()
# The call to self.inner.dummy_method_not_cloned should not raise an error
y = self.inner.dummy_method_not_cloned()
# The call to self.inner.pqr should not raise an error
z = self.inner.pqr
return (inputs, x, y, z)
@torch.jit.export
def dummy_method_not_cloned2(self):
# The call to self.inner.dummy_method_not_cloned should not raise an error
y = self.inner.dummy_method_not_cloned()
# The call to self.inner.pqr should not raise an error
z = self.inner.pqr
return self.pqr, self.dummy_method_not_cloned(), y, z
@torch.jit.export
def dummy_method_not_cloned(self):
return None
@torch.jit.export
def dummy_method_cloned(self):
return None
@torch.jit.export
def dummy_method_ref_attr_pqr(self):
return self.pqr, self.inner.pqr
m = torch.jit.script(MyTestModule())
# Check that the methods exist on the original model.
self.assertEqual(hasattr(m, "dummy_method_not_cloned"), True)
self.assertEqual(hasattr(m, "dummy_method_cloned"), True)
self.assertEqual(hasattr(m, "dummy_method_not_cloned2"), True)
self.assertEqual(hasattr(m, "pqr"), True)
# Case-1: Successfully clone, ignoring 2 methods, keeping all attributes.
cloned = torch._C._hack_do_not_use_clone_module_with_class(
m._c,
["dummy_method_not_cloned", "dummy_method_not_cloned2"], # ignored_methods
[], # ignored_attributes
)
# Check that the ignored methods don't exist on the cloned model.
self.assertEqual(hasattr(cloned, "dummy_method_not_cloned"), False)
self.assertEqual(hasattr(cloned, "dummy_method_cloned"), True)
self.assertEqual(hasattr(cloned, "dummy_method_not_cloned2"), False)
self.assertEqual(hasattr(cloned, "pqr"), True)
# Check that the cloned class has a classname that starts with __torch__.
self.assertTrue(
cloned.qualified_name.startswith('__torch__.'),
("Expected the cloned module's name to start with the string "
"'__torch__.', but got: {0}").format(cloned.qualified_name),
)
# Case-2: Successfully clone the module, ignoring the attribute pqr, and the method that references it.
cloned = torch._C._hack_do_not_use_clone_module_with_class(
m._c,
["dummy_method_not_cloned", "dummy_method_not_cloned2", "dummy_method_ref_attr_pqr"],
["pqr"],
)
# Check that the ignored methods don't exist on the cloned model.
self.assertEqual(hasattr(cloned, "dummy_method_not_cloned"), False)
self.assertEqual(hasattr(cloned, "dummy_method_cloned"), True)
self.assertEqual(hasattr(cloned, "dummy_method_not_cloned2"), False)
self.assertEqual(hasattr(cloned, "dummy_method_ref_attr_pqr"), False)
self.assertEqual(hasattr(cloned, "pqr"), False)
# Case-3: The statement below will throw since dummy_method_cloned2 is preserved,
# and references dummy_method_not_cloned, which is not cloned.
with self.assertRaises(RuntimeError):
cloned = torch._C._hack_do_not_use_clone_module_with_class(m._c, ["dummy_method_not_cloned"], [])
# Case-4: The statement below will throw since dummy_method_ref_attr_pqr
# is preserved, and references "pqr", which is not cloned.
with self.assertRaises(RuntimeError):
cloned = torch._C._hack_do_not_use_clone_module_with_class(
m._c,
["dummy_method_not_cloned", "dummy_method_not_cloned2"],
["pqr"],
)
|
def test_clone_module_with_class(self):
class MyInnerTestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.pqr = torch.Tensor([10., 20., 30.])
def forward(self, inputs):
return inputs
@torch.jit.export
def dummy_method_not_cloned(self):
return 20
class MyTestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.abc = 23
self.pqr = torch.Tensor([1., 2., 3.])
self.inner = MyInnerTestModule()
def forward(self, inputs):
x = self.dummy_method_cloned()
# The call to self.inner.dummy_method_not_cloned should not raise an error
y = self.inner.dummy_method_not_cloned()
# The call to self.inner.pqr should not raise an error
z = self.inner.pqr
return (inputs, x, y, z)
@torch.jit.export
def dummy_method_not_cloned2(self):
# The call to self.inner.dummy_method_not_cloned should not raise an error
y = self.inner.dummy_method_not_cloned()
# The call to self.inner.pqr should not raise an error
z = self.inner.pqr
return self.pqr, self.dummy_method_not_cloned(), y, z
@torch.jit.export
def dummy_method_not_cloned(self):
return None
@torch.jit.export
def dummy_method_cloned(self):
return None
@torch.jit.export
def dummy_method_ref_attr_pqr(self):
return self.pqr, self.inner.pqr
m = torch.jit.script(MyTestModule())
# Check that the methods exist on the original model.
self.assertEqual(hasattr(m, "dummy_method_not_cloned"), True)
self.assertEqual(hasattr(m, "dummy_method_cloned"), True)
self.assertEqual(hasattr(m, "dummy_method_not_cloned2"), True)
self.assertEqual(hasattr(m, "pqr"), True)
# Case-1: Successfully clone, ignoring 2 methods, keeping all attributes.
cloned = torch._C._hack_do_not_use_clone_module_with_class(
m._c,
["dummy_method_not_cloned", "dummy_method_not_cloned2"], # ignored_methods
[], # ignored_attributes
)
# Check that the ignored methods don't exist on the cloned model.
self.assertEqual(hasattr(cloned, "dummy_method_not_cloned"), False)
self.assertEqual(hasattr(cloned, "dummy_method_cloned"), True)
self.assertEqual(hasattr(cloned, "dummy_method_not_cloned2"), False)
self.assertEqual(hasattr(cloned, "pqr"), True)
# Check that the cloned class has a classname that starts with __torch__.
self.assertTrue(
cloned.qualified_name.startswith('__torch__.'),
("Expected the cloned module's name to start with the string "
f"'__torch__.', but got: {cloned.qualified_name}"),
)
# Case-2: Successfully clone the module, ignoring the attribute pqr, and the method that references it.
cloned = torch._C._hack_do_not_use_clone_module_with_class(
m._c,
["dummy_method_not_cloned", "dummy_method_not_cloned2", "dummy_method_ref_attr_pqr"],
["pqr"],
)
# Check that the ignored methods don't exist on the cloned model.
self.assertEqual(hasattr(cloned, "dummy_method_not_cloned"), False)
self.assertEqual(hasattr(cloned, "dummy_method_cloned"), True)
self.assertEqual(hasattr(cloned, "dummy_method_not_cloned2"), False)
self.assertEqual(hasattr(cloned, "dummy_method_ref_attr_pqr"), False)
self.assertEqual(hasattr(cloned, "pqr"), False)
# Case-3: The statement below will throw since dummy_method_cloned2 is preserved,
# and references dummy_method_not_cloned, which is not cloned.
with self.assertRaises(RuntimeError):
cloned = torch._C._hack_do_not_use_clone_module_with_class(m._c, ["dummy_method_not_cloned"], [])
# Case-4: The statement below will throw since dummy_method_ref_attr_pqr
# is preserved, and references "pqr", which is not cloned.
with self.assertRaises(RuntimeError):
cloned = torch._C._hack_do_not_use_clone_module_with_class(
m._c,
["dummy_method_not_cloned", "dummy_method_not_cloned2"],
["pqr"],
)
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class TestOptimizer(TestCase):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class TestOptimizer(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_model_dump.py
|
__init__
|
def __init__(self):
super().__init__()
self.layer1 = torch.nn.Linear(16, 64)
self.relu1 = torch.nn.ReLU()
self.layer2 = torch.nn.Linear(64, 8)
self.relu2 = torch.nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.layer1 = torch.nn.Linear(16, 64)
self.relu1 = torch.nn.ReLU()
self.layer2 = torch.nn.Linear(64, 8)
self.relu2 = torch.nn.ReLU()
|
import os
import io
import functools
import tempfile
import urllib
import unittest
import torch
import torch.backends.xnnpack
import torch.utils.model_dump
import torch.utils.mobile_optimizer
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfNoXNNPACK
from torch.testing._internal.common_quantized import supported_qengines
class SimpleModel(torch.nn.Module):
from selenium import webdriver
|
import os
import io
import functools
import tempfile
import urllib
import unittest
import torch
import torch.backends.xnnpack
import torch.utils.model_dump
import torch.utils.mobile_optimizer
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfNoXNNPACK
from torch.testing._internal.common_quantized import supported_qengines
class SimpleModel(torch.nn.Module):
from selenium import webdriver
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mobile_optimizer.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand((conv_bias_shape)))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand((weight_output_dim)))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape))
self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape))
self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape))
self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim))
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
import unittest
import torch
import torch.nn as nn
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfNoXNNPACK
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
from torch.utils.mobile_optimizer import (LintCode,
generate_mobile_module_lints,
optimize_for_mobile,
MobileOptimizerType)
from torch.nn import functional as F
from torch.testing._internal.common_quantized import override_quantized_engine
import torchvision
FileCheck = torch._C.FileCheck
class MyTestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_model_dump.py
|
__init__
|
def __init__(self):
super().__init__()
self.layer1 = torch.nn.Linear(16, 64)
self.relu1 = torch.nn.ReLU()
self.layer2 = torch.nn.Linear(64, 8)
self.relu2 = torch.nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.layer1 = torch.nn.Linear(16, 64)
self.relu1 = torch.nn.ReLU()
self.layer2 = torch.nn.Linear(64, 8)
self.relu2 = torch.nn.ReLU()
|
import os
import io
import functools
import tempfile
import urllib
import unittest
import torch
import torch.backends.xnnpack
import torch.utils.model_dump
import torch.utils.mobile_optimizer
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfNoXNNPACK
from torch.testing._internal.common_quantized import supported_qengines
class SimpleModel(torch.nn.Module):
from selenium import webdriver
|
import os
import io
import functools
import tempfile
import urllib
import unittest
import torch
import torch.backends.xnnpack
import torch.utils.model_dump
import torch.utils.mobile_optimizer
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfNoXNNPACK
from torch.testing._internal.common_quantized import supported_qengines
class SimpleModel(torch.nn.Module):
from selenium import webdriver
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_model_exports_to_core_aten.py
|
_get_ops_list
|
def _get_ops_list(m: torch.fx.GraphModule):
op_list = []
for n in m.graph.nodes:
if n.op == "call_function":
op_list.append(n.target)
return op_list
class TestQuantizePT2EModels(TestCase):
@pytest.mark.xfail
@skip_if_no_torchvision
def test_vit_aten_export(self):
from torchvision.models import vit_b_16 # @manual
m = vit_b_16(weights="IMAGENET1K_V1")
m = m.eval()
input_shape = (1, 3, 224, 224)
example_inputs = (torch.randn(input_shape),)
m = export.capture_pre_autograd_graph(m, copy.deepcopy(example_inputs))
m(*example_inputs)
m = export.export(m, copy.deepcopy(example_inputs))
ops = _get_ops_list(m.graph_module)
non_core_aten_op_found = False
for op in ops:
if "scaled_dot_product" in str(op):
non_core_aten_op_found = True
self.assertFalse(non_core_aten_op_found)
if __name__ == "__main__":
from torch.testing._internal.common_utils import run_tests
run_tests()
|
import copy
import pytest
import torch
import torch._export as export
from torch.testing._internal.common_quantization import skip_if_no_torchvision
from torch.testing._internal.common_utils import TestCase
from torchvision.models import vit_b_16 # @manual
from torch.testing._internal.common_utils import run_tests
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_model_exports_to_core_aten.py
|
test_vit_aten_export
|
def test_vit_aten_export(self):
from torchvision.models import vit_b_16 # @manual
m = vit_b_16(weights="IMAGENET1K_V1")
m = m.eval()
input_shape = (1, 3, 224, 224)
example_inputs = (torch.randn(input_shape),)
m = export.capture_pre_autograd_graph(m, copy.deepcopy(example_inputs))
m(*example_inputs)
m = export.export(m, copy.deepcopy(example_inputs))
ops = _get_ops_list(m.graph_module)
non_core_aten_op_found = False
for op in ops:
if "scaled_dot_product" in str(op):
non_core_aten_op_found = True
self.assertFalse(non_core_aten_op_found)
|
import copy
import pytest
import torch
import torch._export as export
from torch.testing._internal.common_quantization import skip_if_no_torchvision
from torch.testing._internal.common_utils import TestCase
class TestQuantizePT2EModels(TestCase):
from torchvision.models import vit_b_16 # @manual
from torch.testing._internal.common_utils import run_tests
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_module_init.py
|
build_constructor_arg_db
|
def build_constructor_arg_db():
return {
torch.nn.AdaptiveAvgPool1d: ((5,), {}),
torch.nn.AdaptiveAvgPool2d: ((5,), {}),
torch.nn.AdaptiveAvgPool3d: ((5,), {}),
torch.nn.AdaptiveLogSoftmaxWithLoss: ((100, 20, [5, 10, 15]), {}),
torch.nn.AdaptiveMaxPool1d: ((5,), {}),
torch.nn.AdaptiveMaxPool2d: ((5,), {}),
torch.nn.AdaptiveMaxPool3d: ((5,), {}),
torch.nn.AlphaDropout: ((), {}),
torch.nn.AvgPool1d: ((3,), {}),
torch.nn.AvgPool2d: ((3,), {}),
torch.nn.AvgPool3d: ((3,), {}),
torch.nn.BCELoss: ((), {}),
torch.nn.BCEWithLogitsLoss: ((), {}),
torch.nn.BatchNorm1d: ((5,), {}),
torch.nn.BatchNorm2d: ((5,), {}),
torch.nn.BatchNorm3d: ((5,), {}),
torch.nn.Bilinear: ((2, 3, 4), {}),
torch.nn.CELU: ((), {}),
torch.nn.CTCLoss: ((), {}),
torch.nn.ChannelShuffle: ((4,), {}),
torch.nn.ConstantPad1d: ((2, 3.5), {}),
torch.nn.ConstantPad2d: ((2, 3.5), {}),
torch.nn.ConstantPad3d: ((2, 3.5), {}),
torch.nn.Conv1d: ((3, 3, 3), {}),
torch.nn.Conv2d: ((3, 3, 3), {}),
torch.nn.Conv3d: ((3, 3, 3), {}),
torch.nn.ConvTranspose1d: ((3, 3, 3), {}),
torch.nn.ConvTranspose2d: ((3, 3, 3), {}),
torch.nn.ConvTranspose3d: ((3, 3, 3), {}),
torch.nn.CosineEmbeddingLoss: ((), {}),
torch.nn.CosineSimilarity: ((), {}),
torch.nn.CrossEntropyLoss: ((), {}),
torch.nn.CrossMapLRN2d: ((5,), {}),
torch.nn.Dropout1d: ((), {}),
torch.nn.Dropout2d: ((), {}),
torch.nn.Dropout3d: ((), {}),
torch.nn.Dropout: ((), {}),
torch.nn.ELU: ((), {}),
torch.nn.Embedding: ((10, 5), {}),
torch.nn.EmbeddingBag: ((10, 5), {}),
torch.nn.FeatureAlphaDropout: ((), {}),
torch.nn.Flatten: ((), {}),
torch.nn.Fold: ((5, 2), {}),
torch.nn.FractionalMaxPool2d: ((5, 2), {}),
torch.nn.FractionalMaxPool3d: ((5, 2), {}),
torch.nn.GELU: ((), {}),
torch.nn.GLU: ((), {}),
torch.nn.GRU: ((5, 10), {}),
torch.nn.GRUCell: ((5, 10), {}),
torch.nn.GaussianNLLLoss: ((), {}),
torch.nn.GroupNorm: ((3, 6, 1e-5, True), {}),
torch.nn.Hardshrink: ((), {}),
torch.nn.Hardsigmoid: ((), {}),
torch.nn.Hardswish: ((), {}),
torch.nn.Hardtanh: ((), {}),
torch.nn.HingeEmbeddingLoss: ((), {}),
torch.nn.HuberLoss: ((), {}),
torch.nn.Identity: ((), {}),
torch.nn.InstanceNorm1d: ((5, 1e-5, 0.1, True), {}),
torch.nn.InstanceNorm2d: ((5, 1e-5, 0.1, True), {}),
torch.nn.InstanceNorm3d: ((5, 1e-5, 0.1, True), {}),
torch.nn.KLDivLoss: ((), {}),
torch.nn.L1Loss: ((), {}),
torch.nn.LPPool1d: ((2, 3), {}),
torch.nn.LPPool2d: ((2, 3), {}),
torch.nn.LSTM: ((5, 10), {}),
torch.nn.LSTMCell: ((5, 10), {}),
torch.nn.LayerNorm: ((2,), {}),
torch.nn.LazyBatchNorm1d: ((), {}),
torch.nn.LazyBatchNorm2d: ((), {}),
torch.nn.LazyBatchNorm3d: ((), {}),
torch.nn.LazyConv1d: ((5, 2), {}),
torch.nn.LazyConv2d: ((5, 2), {}),
torch.nn.LazyConv3d: ((5, 2), {}),
torch.nn.LazyConvTranspose1d: ((5, 2), {}),
torch.nn.LazyConvTranspose2d: ((5, 2), {}),
torch.nn.LazyConvTranspose3d: ((5, 2), {}),
torch.nn.LazyInstanceNorm1d: ((), {}),
torch.nn.LazyInstanceNorm2d: ((), {}),
torch.nn.LazyInstanceNorm3d: ((), {}),
torch.nn.LazyLinear: ((5,), {}),
torch.nn.LeakyReLU: ((), {}),
torch.nn.Linear: ((10, 5), {}),
torch.nn.LocalResponseNorm: ((2,), {}),
torch.nn.LogSigmoid: ((), {}),
torch.nn.LogSoftmax: ((), {}),
torch.nn.MSELoss: ((), {}),
torch.nn.MarginRankingLoss: ((), {}),
torch.nn.MaxPool1d: ((3,), {}),
torch.nn.MaxPool2d: ((3,), {}),
torch.nn.MaxPool3d: ((3,), {}),
torch.nn.MaxUnpool1d: ((5,), {}),
torch.nn.MaxUnpool2d: ((5,), {}),
torch.nn.MaxUnpool3d: ((5,), {}),
torch.nn.Mish: ((), {}),
torch.nn.ModuleDict: ((), {}),
torch.nn.ModuleList: ((), {}),
torch.nn.MultiLabelMarginLoss: ((), {}),
torch.nn.MultiLabelSoftMarginLoss: ((), {}),
torch.nn.MultiMarginLoss: ((), {}),
torch.nn.MultiheadAttention: ((100, 2), {}),
torch.nn.NLLLoss2d: ((), {}),
torch.nn.NLLLoss: ((), {}),
torch.nn.PReLU: ((), {}),
torch.nn.PairwiseDistance: ((), {}),
torch.nn.ParameterDict: ((), {}),
torch.nn.ParameterList: ((), {}),
torch.nn.PixelShuffle: ((2,), {}),
torch.nn.PixelUnshuffle: ((2,), {}),
torch.nn.PoissonNLLLoss: ((), {}),
torch.nn.RNN: ((5, 10), {}),
torch.nn.RNNBase: (('LSTM', 5, 10), {}),
torch.nn.RNNCell: ((5, 10), {}),
torch.nn.RNNCellBase: ((5, 10, True, 2), {}),
torch.nn.RReLU: ((), {}),
torch.nn.ReLU6: ((), {}),
torch.nn.ReLU: ((), {}),
torch.nn.ReflectionPad1d: ((2,), {}),
torch.nn.ReflectionPad2d: ((2,), {}),
torch.nn.ReflectionPad3d: ((2,), {}),
torch.nn.ReplicationPad1d: ((2,), {}),
torch.nn.ReplicationPad2d: ((2,), {}),
torch.nn.ReplicationPad3d: ((2,), {}),
torch.nn.SELU: ((), {}),
torch.nn.Sequential: ((), {}),
torch.nn.SiLU: ((), {}),
torch.nn.Sigmoid: ((), {}),
torch.nn.SmoothL1Loss: ((), {}),
torch.nn.SoftMarginLoss: ((), {}),
torch.nn.Softmax2d: ((), {}),
torch.nn.Softmax: ((), {}),
torch.nn.Softmin: ((), {}),
torch.nn.Softplus: ((), {}),
torch.nn.Softshrink: ((), {}),
torch.nn.Softsign: ((), {}),
torch.nn.SyncBatchNorm: ((5,), {}),
torch.nn.Tanh: ((), {}),
torch.nn.Tanhshrink: ((), {}),
torch.nn.Threshold: ((0.1, 20), {}),
torch.nn.Transformer: ((), {}),
torch.nn.TransformerDecoder: ((torch.nn.TransformerDecoderLayer, 3), {}),
torch.nn.TransformerDecoderLayer: ((10, 2), {}),
torch.nn.TransformerEncoder: ((torch.nn.TransformerEncoderLayer, 3), {}),
torch.nn.TransformerEncoderLayer: ((10, 2), {}),
torch.nn.TripletMarginLoss: ((), {}),
torch.nn.TripletMarginWithDistanceLoss: ((), {}),
torch.nn.Unflatten: ((1, (2, 5, 5)), {}),
torch.nn.Unfold: ((3,), {}),
torch.nn.Upsample: ((), {}),
torch.nn.UpsamplingBilinear2d: ((), {}),
torch.nn.UpsamplingNearest2d: ((), {}),
torch.nn.ZeroPad2d: ((0,), {}),
torch.ao.nn.qat.Conv1d: ((3, 3, 3), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.ao.nn.qat.Conv2d: ((3, 3, 3), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.ao.nn.qat.Conv3d: ((3, 3, 3), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.ao.nn.qat.Linear: ((5, 2), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.ao.nn.qat.Embedding: ((10, 12), {
'qconfig': torch.ao.quantization.float_qparams_weight_only_qconfig,
}),
torch.ao.nn.qat.EmbeddingBag: ((10, 12), {
'qconfig': torch.ao.quantization.float_qparams_weight_only_qconfig,
}),
torch.ao.nn.quantizable.LSTM: ((5, 6), {}),
torch.ao.nn.quantizable.LSTMCell: ((5, 6), {}),
torch.ao.nn.quantizable.MultiheadAttention: ((10, 2), {}),
torch.ao.nn.quantized.BatchNorm2d: ((2,), {}),
torch.ao.nn.quantized.BatchNorm3d: ((2,), {}),
torch.ao.nn.quantized.Dropout: ((), {}),
torch.ao.nn.quantized.Conv1d: ((3, 3, 3), {}),
torch.ao.nn.quantized.Conv2d: ((3, 3, 3), {}),
torch.ao.nn.quantized.Conv3d: ((3, 3, 3), {}),
torch.ao.nn.quantized.ConvTranspose1d: ((3, 3, 3), {}),
torch.ao.nn.quantized.ConvTranspose2d: ((3, 3, 3), {}),
torch.ao.nn.quantized.ConvTranspose3d: ((16, 33, (3, 3, 5)), {
'stride': (2, 1, 1),
'padding': (4, 2, 2),
'output_padding': (2, 2, 2),
'dilation': (1, 1, 1),
}),
torch.ao.nn.quantized.DeQuantize: ((), {}),
torch.ao.nn.quantized.ELU: ((0.01, 0), {}),
torch.ao.nn.quantized.Embedding: ((10, 3), {
'factory_kwargs': {},
}),
torch.ao.nn.quantized.EmbeddingBag: ((10, 3), {
'factory_kwargs': {},
}),
torch.ao.nn.quantized.GroupNorm: ((2, 4, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.ao.nn.quantized.Hardswish: ((0.1, 0,), {}),
torch.ao.nn.quantized.InstanceNorm1d: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.ao.nn.quantized.InstanceNorm2d: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.ao.nn.quantized.InstanceNorm3d: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.ao.nn.quantized.LayerNorm: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.ao.nn.quantized.LeakyReLU: ((0.01, 0), {}),
torch.ao.nn.quantized.Linear: ((5, 2), {
'factory_kwargs': {},
}),
torch.ao.nn.quantized.MaxPool2d: ((3,), {}),
torch.ao.nn.quantized.Quantize: ((0.1, 0), {
'dtype': torch.int16,
'factory_kwargs': {},
}),
torch.ao.nn.quantized.ReLU6: ((), {}),
torch.ao.nn.quantized.Sigmoid: ((0.1, 0), {}),
torch.ao.nn.quantized.Softmax: ((), {}),
torch.ao.nn.quantized.FloatFunctional: ((), {}),
torch.ao.nn.quantized.FXFloatFunctional: ((), {}),
torch.ao.nn.quantized.QFunctional: ((), {}),
# Remove torch.ao.nn.quantized after the migration completes:
torch.ao.nn.qat.Conv1d: ((3, 3, 3), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.ao.nn.qat.Conv2d: ((3, 3, 3), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.ao.nn.qat.Conv3d: ((3, 3, 3), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.ao.nn.qat.Linear: ((5, 2), {
'qconfig': torch.ao.quantization.default_qconfig,
}),
torch.ao.nn.qat.Embedding: ((10, 12), {
'qconfig': torch.ao.quantization.float_qparams_weight_only_qconfig,
}),
torch.ao.nn.qat.EmbeddingBag: ((10, 12), {
'qconfig': torch.ao.quantization.float_qparams_weight_only_qconfig,
}),
torch.ao.nn.quantized.BatchNorm2d: ((2,), {}),
torch.ao.nn.quantized.BatchNorm3d: ((2,), {}),
torch.ao.nn.quantized.Dropout: ((), {}),
torch.ao.nn.quantized.Conv1d: ((3, 3, 3), {}),
torch.ao.nn.quantized.Conv2d: ((3, 3, 3), {}),
torch.ao.nn.quantized.Conv3d: ((3, 3, 3), {}),
torch.ao.nn.quantized.ConvTranspose1d: ((3, 3, 3), {}),
torch.ao.nn.quantized.ConvTranspose2d: ((3, 3, 3), {}),
torch.ao.nn.quantized.ConvTranspose3d: ((16, 33, (3, 3, 5)), {
'stride': (2, 1, 1),
'padding': (4, 2, 2),
'output_padding': (2, 2, 2),
'dilation': (1, 1, 1),
}),
torch.ao.nn.quantized.DeQuantize: ((), {}),
torch.ao.nn.quantized.ELU: ((0.01, 0), {}),
torch.ao.nn.quantized.Embedding: ((10, 3), {
'factory_kwargs': {},
}),
torch.ao.nn.quantized.EmbeddingBag: ((10, 3), {
'factory_kwargs': {},
}),
torch.ao.nn.quantized.GroupNorm: ((2, 4, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.ao.nn.quantized.Hardswish: ((0.1, 0,), {}),
torch.ao.nn.quantized.InstanceNorm1d: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.ao.nn.quantized.InstanceNorm2d: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.ao.nn.quantized.InstanceNorm3d: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.ao.nn.quantized.LayerNorm: ((2, torch.nn.Parameter(torch.tensor(2.)),
torch.nn.Parameter(torch.tensor(2.)), 0.1, 0), {}),
torch.ao.nn.quantized.LeakyReLU: ((0.01, 0), {}),
torch.ao.nn.quantized.Linear: ((5, 2), {
'factory_kwargs': {},
}),
torch.ao.nn.quantized.MaxPool2d: ((3,), {}),
torch.ao.nn.quantized.PReLU: ((0.01, 0), {}),
torch.ao.nn.quantized.Quantize: ((0.1, 0), {
'dtype': torch.int16,
'factory_kwargs': {},
}),
torch.ao.nn.quantized.ReLU6: ((), {}),
torch.ao.nn.quantized.Sigmoid: ((0.1, 0), {}),
torch.ao.nn.quantized.Softmax: ((), {}),
torch.ao.nn.quantized.FloatFunctional: ((), {}),
torch.ao.nn.quantized.FXFloatFunctional: ((), {}),
torch.ao.nn.quantized.QFunctional: ((), {}),
}
# Instantiates the given class with the given args, kwargs, optionally on a given device.
|
import inspect
import torch
from unittest import mock
from unittest.mock import MagicMock, patch
from torch.testing._internal.common_dtype import floating_types
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import TestCase, run_tests
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_module_init.py
|
instantiate_class
|
def instantiate_class(cls, args, kwargs, extra_kwargs):
return cls(*args, **kwargs) if extra_kwargs is None else cls(*args, **kwargs, **extra_kwargs)
# Returns a function that calls the real implementation of a method
# in addition to passing args to a mock object.
|
import inspect
import torch
from unittest import mock
from unittest.mock import MagicMock, patch
from torch.testing._internal.common_dtype import floating_types
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import TestCase, run_tests
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_module_init.py
|
mock_wrapper
|
def mock_wrapper(method):
mock = MagicMock()
def wrapper(self, *args, **kwargs):
mock(*args, **kwargs)
return method(self, *args, **kwargs)
wrapper.mock = mock
return wrapper
# Returns a set of args / kwargs that can be used to construct the module.
|
import inspect
import torch
from unittest import mock
from unittest.mock import MagicMock, patch
from torch.testing._internal.common_dtype import floating_types
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import TestCase, run_tests
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_module_init.py
|
get_example_args
|
def get_example_args(module_cls, constructor_arg_db, extra_kwargs=None):
assert module_cls in constructor_arg_db, \
f"No entry for {module_cls} in the constructor arg DB. Please add it to pass these tests."
args, kwargs = constructor_arg_db[module_cls]
extra_kwargs = {} if extra_kwargs is None else extra_kwargs
# Recursively instantiate args / kwargs that are class objects.
args = [instantiate_class(arg, *get_example_args(arg, constructor_arg_db), extra_kwargs=extra_kwargs)
if inspect.isclass(arg) else torch.nn.Parameter(arg.to(**extra_kwargs))
if isinstance(arg, torch.nn.Parameter) else arg for arg in args]
kwargs = {k: instantiate_class(v, *get_example_args(v, constructor_arg_db), extra_kwargs=extra_kwargs)
if inspect.isclass(v) else torch.nn.Parameter(v.to(*extra_kwargs))
if isinstance(v, torch.nn.Parameter) else v for k, v in kwargs.items()}
kwargs.update(extra_kwargs)
return args, kwargs
|
import inspect
import torch
from unittest import mock
from unittest.mock import MagicMock, patch
from torch.testing._internal.common_dtype import floating_types
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import TestCase, run_tests
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_module_init.py
|
run_test
|
def run_test(test_cls, device, dtype, module_cls=module_cls):
# Check if this module creates parameters or registers buffers.
# The mock magic here passes through to the real Parameter / register_buffer
# logic and is only used to check for calls.
args, kwargs = get_example_args(module_cls, constructor_arg_db)
# Some modules need to pass factory_kwargs so as not to conflict with existing args such as dtype.
module_needs_factory_kwargs = 'factory_kwargs' in kwargs
if module_needs_factory_kwargs:
del kwargs['factory_kwargs']
extra_kwargs = {
'factory_kwargs': {
'device': device,
'dtype': dtype,
}
}
else:
extra_kwargs = {
'device': device,
'dtype': dtype,
}
parameter_new = mock_wrapper(torch.nn.Parameter.__new__)
with patch.object(torch.nn.Parameter, '__new__', parameter_new):
register_buffer = mock_wrapper(torch.nn.Module.register_buffer)
with patch.object(torch.nn.Module, 'register_buffer', register_buffer):
m = module_cls(*args, **kwargs)
module_creates_params_or_buffers = parameter_new.mock.called or register_buffer.mock.called
# == Verify factory kwargs are supported. ==
if verify_kwargs and module_creates_params_or_buffers:
args, kwargs = get_example_args(module_cls, constructor_arg_db,
extra_kwargs=extra_kwargs)
if module_is_lazy:
# Ensure device and dtype are passed to all UninitializedParameters and UninitializedBuffers.
uninit_param_new = mock_wrapper(torch.nn.UninitializedParameter.__new__)
with patch.object(torch.nn.UninitializedParameter, '__new__', uninit_param_new):
uninit_buffer_new = mock_wrapper(torch.nn.UninitializedBuffer.__new__)
with patch.object(torch.nn.UninitializedBuffer, '__new__', uninit_buffer_new):
m = module_cls(*args, **kwargs)
uninit_param_new.mock.assert_has_calls(
[mock.call(device=device, dtype=dtype) for _ in uninit_param_new.mock.mock_calls])
uninit_buffer_new.mock.assert_has_calls(
[mock.call(device=device, dtype=dtype) for _ in uninit_buffer_new.mock.mock_calls])
else:
# Check device placement and dtype for parameters and buffers.
# Only verify floating point dtypes since that's what the kwarg applies to.
# Note that dtype verification is also skipped if the module requires factory_kwargs.
m = module_cls(*args, **kwargs)
for name, param in m.named_parameters():
test_cls.assertEqual(
str(param.device), device,
f'Parameter {name} is on {param.device.type} instead of the expected device {device}')
if param.dtype.is_floating_point and not module_needs_factory_kwargs:
test_cls.assertEqual(
param.dtype, dtype,
f'Parameter {name} is of dtype {param.dtype} instead of the expected dtype {dtype}')
for name, buffer in m.named_buffers():
test_cls.assertEqual(
str(buffer.device), device,
f'Buffer {name} is on {buffer.device.type} instead of the expected device {device}')
if buffer.dtype.is_floating_point and not module_needs_factory_kwargs:
test_cls.assertEqual(
buffer.dtype, dtype,
f'Buffer {name} is of dtype {buffer.dtype} instead of the expected dtype {dtype}')
# == Verify passing a nonexistent arg errors out. ==
if check_nonexistent_arg:
with test_cls.assertRaises(TypeError):
m = module_cls(*args, **kwargs, nonexistent_arg='foo')
return run_test
|
import inspect
import torch
from unittest import mock
from unittest.mock import MagicMock, patch
from torch.testing._internal.common_dtype import floating_types
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import TestCase, run_tests
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_model_dump.py
|
check_memory
|
def check_memory(model, expected):
self.open_html_model(wd, model)
memory_table = self.open_section_and_get_body(wd, "Tensor Memory")
device = memory_table.find_element_by_xpath("//table/tbody/tr[1]/td[1]").text
self.assertEqual("cpu", device)
memory_usage_str = memory_table.find_element_by_xpath("//table/tbody/tr[1]/td[2]").text
self.assertEqual(expected, int(memory_usage_str))
simple_model_memory = (
# First layer, including bias.
64 * (16 + 1) +
# Second layer, including bias.
8 * (64 + 1)
# 32-bit float
) * 4
check_memory(torch.jit.script(SimpleModel()), simple_model_memory)
# The same SimpleModel instance appears twice in this model.
# The tensors will be shared, so ensure no double-counting.
a_simple_model = SimpleModel()
check_memory(
torch.jit.script(
torch.nn.Sequential(a_simple_model, a_simple_model)),
simple_model_memory)
# The freezing process will move the weight and bias
# from data to constants. Ensure they are still counted.
check_memory(
torch.jit.freeze(torch.jit.script(SimpleModel()).eval()),
simple_model_memory)
# Make sure we can handle a model with both constants and data tensors.
class ComposedModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.w1 = torch.zeros(1, 2)
self.w2 = torch.ones(2, 2)
def forward(self, arg):
return arg * self.w2 + self.w1
check_memory(
torch.jit.freeze(
torch.jit.script(ComposedModule()).eval(),
preserved_attrs=["w1"]),
4 * (2 + 4))
|
def check_memory(model, expected):
self.open_html_model(wd, model)
memory_table = self.open_section_and_get_body(wd, "Tensor Memory")
device = memory_table.find_element_by_xpath("//table/tbody/tr[1]/td[1]").text
self.assertEqual("cpu", device)
memory_usage_str = memory_table.find_element_by_xpath("//table/tbody/tr[1]/td[2]").text
self.assertEqual(expected, int(memory_usage_str))
simple_model_memory = (
# First layer, including bias.
64 * (16 + 1) +
# Second layer, including bias.
8 * (64 + 1)
# 32-bit float
) * 4
check_memory(torch.jit.script(SimpleModel()), simple_model_memory)
# The same SimpleModel instance appears twice in this model.
# The tensors will be shared, so ensure no double-counting.
a_simple_model = SimpleModel()
check_memory(
torch.jit.script(
torch.nn.Sequential(a_simple_model, a_simple_model)),
simple_model_memory)
# The freezing process will move the weight and bias
# from data to constants. Ensure they are still counted.
check_memory(
torch.jit.freeze(torch.jit.script(SimpleModel()).eval()),
simple_model_memory)
# Make sure we can handle a model with both constants and data tensors.
class ComposedModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w1 = torch.zeros(1, 2)
self.w2 = torch.ones(2, 2)
def forward(self, arg):
return arg * self.w2 + self.w1
check_memory(
torch.jit.freeze(
torch.jit.script(ComposedModule()).eval(),
preserved_attrs=["w1"]),
4 * (2 + 4))
|
import os
import io
import functools
import tempfile
import urllib
import unittest
import torch
import torch.backends.xnnpack
import torch.utils.model_dump
import torch.utils.mobile_optimizer
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfNoXNNPACK
from torch.testing._internal.common_quantized import supported_qengines
from selenium import webdriver
|
import os
import io
import functools
import tempfile
import urllib
import unittest
import torch
import torch.backends.xnnpack
import torch.utils.model_dump
import torch.utils.mobile_optimizer
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfNoXNNPACK
from torch.testing._internal.common_quantized import supported_qengines
from selenium import webdriver
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_model_dump.py
|
__init__
|
def __init__(self):
super().__init__()
self.layer1 = torch.nn.Linear(16, 64)
self.relu1 = torch.nn.ReLU()
self.layer2 = torch.nn.Linear(64, 8)
self.relu2 = torch.nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.layer1 = torch.nn.Linear(16, 64)
self.relu1 = torch.nn.ReLU()
self.layer2 = torch.nn.Linear(64, 8)
self.relu2 = torch.nn.ReLU()
|
import os
import io
import functools
import tempfile
import urllib
import unittest
import torch
import torch.backends.xnnpack
import torch.utils.model_dump
import torch.utils.mobile_optimizer
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfNoXNNPACK
from torch.testing._internal.common_quantized import supported_qengines
class SimpleModel(torch.nn.Module):
from selenium import webdriver
|
import os
import io
import functools
import tempfile
import urllib
import unittest
import torch
import torch.backends.xnnpack
import torch.utils.model_dump
import torch.utils.mobile_optimizer
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfNoXNNPACK
from torch.testing._internal.common_quantized import supported_qengines
class SimpleModel(torch.nn.Module):
from selenium import webdriver
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_model_dump.py
|
__init__
|
def __init__(self):
super().__init__()
self.layer1 = torch.nn.Linear(16, 64)
self.relu1 = torch.nn.ReLU()
self.layer2 = torch.nn.Linear(64, 8)
self.relu2 = torch.nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.layer1 = torch.nn.Linear(16, 64)
self.relu1 = torch.nn.ReLU()
self.layer2 = torch.nn.Linear(64, 8)
self.relu2 = torch.nn.ReLU()
|
import os
import io
import functools
import tempfile
import urllib
import unittest
import torch
import torch.backends.xnnpack
import torch.utils.model_dump
import torch.utils.mobile_optimizer
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfNoXNNPACK
from torch.testing._internal.common_quantized import supported_qengines
class SimpleModel(torch.nn.Module):
from selenium import webdriver
|
import os
import io
import functools
import tempfile
import urllib
import unittest
import torch
import torch.backends.xnnpack
import torch.utils.model_dump
import torch.utils.mobile_optimizer
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfNoXNNPACK
from torch.testing._internal.common_quantized import supported_qengines
class SimpleModel(torch.nn.Module):
from selenium import webdriver
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_model_dump.py
|
webdriver_test
|
def webdriver_test(testfunc):
@functools.wraps(testfunc)
def wrapper(self, *args, **kwds):
self.needs_resources()
if os.environ.get("RUN_WEBDRIVER") != "1":
self.skipTest("Webdriver not requested")
from selenium import webdriver
for driver in [
"Firefox",
"Chrome",
]:
with self.subTest(driver=driver):
wd = getattr(webdriver, driver)()
testfunc(self, wd, *args, **kwds)
wd.close()
return wrapper
class TestModelDump(TestCase):
def needs_resources(self):
pass
def test_inline_skeleton(self):
self.needs_resources()
skel = torch.utils.model_dump.get_inline_skeleton()
assert "unpkg.org" not in skel
assert "src=" not in skel
def do_dump_model(self, model, extra_files=None):
# Just check that we're able to run successfully.
buf = io.BytesIO()
torch.jit.save(model, buf, _extra_files=extra_files)
info = torch.utils.model_dump.get_model_info(buf)
assert info is not None
def open_html_model(self, wd, model, extra_files=None):
buf = io.BytesIO()
torch.jit.save(model, buf, _extra_files=extra_files)
page = torch.utils.model_dump.get_info_and_burn_skeleton(buf)
wd.get("data:text/html;charset=utf-8," + urllib.parse.quote(page))
def open_section_and_get_body(self, wd, name):
container = wd.find_element_by_xpath(f"//div[@data-hider-title='{name}']")
caret = container.find_element_by_class_name("caret")
if container.get_attribute("data-shown") != "true":
caret.click()
content = container.find_element_by_tag_name("div")
return content
def test_scripted_model(self):
model = torch.jit.script(SimpleModel())
self.do_dump_model(model)
def test_traced_model(self):
model = torch.jit.trace(SimpleModel(), torch.zeros(2, 16))
self.do_dump_model(model)
def test_main(self):
self.needs_resources()
if IS_WINDOWS:
# I was getting tempfile errors in CI. Just skip it.
self.skipTest("Disabled on Windows.")
with tempfile.NamedTemporaryFile() as tf:
torch.jit.save(torch.jit.script(SimpleModel()), tf)
# Actually write contents to disk so we can read it below
tf.flush()
stdout = io.StringIO()
torch.utils.model_dump.main(
[
None,
"--style=json",
tf.name,
],
stdout=stdout)
self.assertRegex(stdout.getvalue(), r'\A{.*SimpleModel')
stdout = io.StringIO()
torch.utils.model_dump.main(
[
None,
"--style=html",
tf.name,
],
stdout=stdout)
self.assertRegex(
stdout.getvalue().replace("\n", " "),
r'\A<!DOCTYPE.*SimpleModel.*componentDidMount')
def get_quant_model(self):
fmodel = QuantModel().eval()
fmodel = torch.ao.quantization.fuse_modules(fmodel, [
["core.layer1", "core.relu1"],
["core.layer2", "core.relu2"],
])
fmodel.qconfig = torch.ao.quantization.get_default_qconfig("qnnpack")
prepped = torch.ao.quantization.prepare(fmodel)
prepped(torch.randn(2, 16))
qmodel = torch.ao.quantization.convert(prepped)
return qmodel
@unittest.skipUnless("qnnpack" in supported_qengines, "QNNPACK not available")
def test_quantized_model(self):
qmodel = self.get_quant_model()
self.do_dump_model(torch.jit.script(qmodel))
@skipIfNoXNNPACK
@unittest.skipUnless("qnnpack" in supported_qengines, "QNNPACK not available")
def test_optimized_quantized_model(self):
qmodel = self.get_quant_model()
smodel = torch.jit.trace(qmodel, torch.zeros(2, 16))
omodel = torch.utils.mobile_optimizer.optimize_for_mobile(smodel)
self.do_dump_model(omodel)
def test_model_with_lists(self):
model = torch.jit.script(ModelWithLists())
self.do_dump_model(model)
def test_invalid_json(self):
model = torch.jit.script(SimpleModel())
self.do_dump_model(model, extra_files={"foo.json": "{"})
@webdriver_test
def test_memory_computation(self, wd):
def check_memory(model, expected):
self.open_html_model(wd, model)
memory_table = self.open_section_and_get_body(wd, "Tensor Memory")
device = memory_table.find_element_by_xpath("//table/tbody/tr[1]/td[1]").text
self.assertEqual("cpu", device)
memory_usage_str = memory_table.find_element_by_xpath("//table/tbody/tr[1]/td[2]").text
self.assertEqual(expected, int(memory_usage_str))
simple_model_memory = (
# First layer, including bias.
64 * (16 + 1) +
# Second layer, including bias.
8 * (64 + 1)
# 32-bit float
) * 4
check_memory(torch.jit.script(SimpleModel()), simple_model_memory)
# The same SimpleModel instance appears twice in this model.
# The tensors will be shared, so ensure no double-counting.
a_simple_model = SimpleModel()
check_memory(
torch.jit.script(
torch.nn.Sequential(a_simple_model, a_simple_model)),
simple_model_memory)
# The freezing process will move the weight and bias
# from data to constants. Ensure they are still counted.
check_memory(
torch.jit.freeze(torch.jit.script(SimpleModel()).eval()),
simple_model_memory)
# Make sure we can handle a model with both constants and data tensors.
class ComposedModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.w1 = torch.zeros(1, 2)
self.w2 = torch.ones(2, 2)
def forward(self, arg):
return arg * self.w2 + self.w1
check_memory(
torch.jit.freeze(
torch.jit.script(ComposedModule()).eval(),
preserved_attrs=["w1"]),
4 * (2 + 4))
if __name__ == '__main__':
run_tests()
|
def webdriver_test(testfunc):
@functools.wraps(testfunc)
def wrapper(self, *args, **kwds):
self.needs_resources()
if os.environ.get("RUN_WEBDRIVER") != "1":
self.skipTest("Webdriver not requested")
from selenium import webdriver
for driver in [
"Firefox",
"Chrome",
]:
with self.subTest(driver=driver):
wd = getattr(webdriver, driver)()
testfunc(self, wd, *args, **kwds)
wd.close()
return wrapper
class TestModelDump(TestCase):
def needs_resources(self):
pass
def test_inline_skeleton(self):
self.needs_resources()
skel = torch.utils.model_dump.get_inline_skeleton()
assert "unpkg.org" not in skel
assert "src=" not in skel
def do_dump_model(self, model, extra_files=None):
# Just check that we're able to run successfully.
buf = io.BytesIO()
torch.jit.save(model, buf, _extra_files=extra_files)
info = torch.utils.model_dump.get_model_info(buf)
assert info is not None
def open_html_model(self, wd, model, extra_files=None):
buf = io.BytesIO()
torch.jit.save(model, buf, _extra_files=extra_files)
page = torch.utils.model_dump.get_info_and_burn_skeleton(buf)
wd.get("data:text/html;charset=utf-8," + urllib.parse.quote(page))
def open_section_and_get_body(self, wd, name):
container = wd.find_element_by_xpath(f"//div[@data-hider-title='{name}']")
caret = container.find_element_by_class_name("caret")
if container.get_attribute("data-shown") != "true":
caret.click()
content = container.find_element_by_tag_name("div")
return content
def test_scripted_model(self):
model = torch.jit.script(SimpleModel())
self.do_dump_model(model)
def test_traced_model(self):
model = torch.jit.trace(SimpleModel(), torch.zeros(2, 16))
self.do_dump_model(model)
def test_main(self):
self.needs_resources()
if IS_WINDOWS:
# I was getting tempfile errors in CI. Just skip it.
self.skipTest("Disabled on Windows.")
with tempfile.NamedTemporaryFile() as tf:
torch.jit.save(torch.jit.script(SimpleModel()), tf)
# Actually write contents to disk so we can read it below
tf.flush()
stdout = io.StringIO()
torch.utils.model_dump.main(
[
None,
"--style=json",
tf.name,
],
stdout=stdout)
self.assertRegex(stdout.getvalue(), r'\A{.*SimpleModel')
stdout = io.StringIO()
torch.utils.model_dump.main(
[
None,
"--style=html",
tf.name,
],
stdout=stdout)
self.assertRegex(
stdout.getvalue().replace("\n", " "),
r'\A<!DOCTYPE.*SimpleModel.*componentDidMount')
def get_quant_model(self):
fmodel = QuantModel().eval()
fmodel = torch.ao.quantization.fuse_modules(fmodel, [
["core.layer1", "core.relu1"],
["core.layer2", "core.relu2"],
])
fmodel.qconfig = torch.ao.quantization.get_default_qconfig("qnnpack")
prepped = torch.ao.quantization.prepare(fmodel)
prepped(torch.randn(2, 16))
qmodel = torch.ao.quantization.convert(prepped)
return qmodel
@unittest.skipUnless("qnnpack" in supported_qengines, "QNNPACK not available")
def test_quantized_model(self):
qmodel = self.get_quant_model()
self.do_dump_model(torch.jit.script(qmodel))
@skipIfNoXNNPACK
@unittest.skipUnless("qnnpack" in supported_qengines, "QNNPACK not available")
def test_optimized_quantized_model(self):
qmodel = self.get_quant_model()
smodel = torch.jit.trace(qmodel, torch.zeros(2, 16))
omodel = torch.utils.mobile_optimizer.optimize_for_mobile(smodel)
self.do_dump_model(omodel)
def test_model_with_lists(self):
model = torch.jit.script(ModelWithLists())
self.do_dump_model(model)
def test_invalid_json(self):
model = torch.jit.script(SimpleModel())
self.do_dump_model(model, extra_files={"foo.json": "{"})
@webdriver_test
def test_memory_computation(self, wd):
def check_memory(model, expected):
self.open_html_model(wd, model)
memory_table = self.open_section_and_get_body(wd, "Tensor Memory")
device = memory_table.find_element_by_xpath("//table/tbody/tr[1]/td[1]").text
self.assertEqual("cpu", device)
memory_usage_str = memory_table.find_element_by_xpath("//table/tbody/tr[1]/td[2]").text
self.assertEqual(expected, int(memory_usage_str))
simple_model_memory = (
# First layer, including bias.
64 * (16 + 1) +
# Second layer, including bias.
8 * (64 + 1)
# 32-bit float
) * 4
check_memory(torch.jit.script(SimpleModel()), simple_model_memory)
# The same SimpleModel instance appears twice in this model.
# The tensors will be shared, so ensure no double-counting.
a_simple_model = SimpleModel()
check_memory(
torch.jit.script(
torch.nn.Sequential(a_simple_model, a_simple_model)),
simple_model_memory)
# The freezing process will move the weight and bias
# from data to constants. Ensure they are still counted.
check_memory(
torch.jit.freeze(torch.jit.script(SimpleModel()).eval()),
simple_model_memory)
# Make sure we can handle a model with both constants and data tensors.
class ComposedModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w1 = torch.zeros(1, 2)
self.w2 = torch.ones(2, 2)
def forward(self, arg):
return arg * self.w2 + self.w1
check_memory(
torch.jit.freeze(
torch.jit.script(ComposedModule()).eval(),
preserved_attrs=["w1"]),
4 * (2 + 4))
if __name__ == '__main__':
run_tests()
|
import os
import io
import functools
import tempfile
import urllib
import unittest
import torch
import torch.backends.xnnpack
import torch.utils.model_dump
import torch.utils.mobile_optimizer
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfNoXNNPACK
from torch.testing._internal.common_quantized import supported_qengines
from selenium import webdriver
|
import os
import io
import functools
import tempfile
import urllib
import unittest
import torch
import torch.backends.xnnpack
import torch.utils.model_dump
import torch.utils.mobile_optimizer
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfNoXNNPACK
from torch.testing._internal.common_quantized import supported_qengines
from selenium import webdriver
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_module_init.py
|
generate_tests
|
def generate_tests(test_cls, constructor_arg_db):
# test all modules underneath these namespaces...
NAMESPACES = [
torch.nn,
torch.ao.nn.qat,
torch.ao.nn.quantized,
torch.ao.nn.qat,
torch.ao.nn.quantizable,
torch.ao.nn.quantized,
]
# ...except these
MODULES_TO_SKIP = {
torch.nn.Module,
torch.nn.Container, # deprecated
torch.nn.NLLLoss2d, # deprecated
# TODO: Remove these 4 from this list once the ASan issue is fixed.
# See https://github.com/pytorch/pytorch/issues/55396
torch.ao.nn.quantized.Embedding,
torch.ao.nn.quantized.EmbeddingBag,
torch.ao.nn.quantized.Embedding,
torch.ao.nn.quantized.EmbeddingBag,
torch.ao.nn.quantized.LSTM,
torch.ao.nn.quantized.MultiheadAttention,
}
# no need to support kwargs for these modules even though
# they have parameters / buffers because they are passed in
# already instantiated s
MODULES_WITHOUT_KWARGS_SUPPORT = {
torch.nn.BCELoss,
torch.nn.BCEWithLogitsLoss,
torch.nn.CrossEntropyLoss,
torch.nn.FractionalMaxPool2d,
torch.nn.FractionalMaxPool3d,
torch.nn.MultiLabelSoftMarginLoss,
torch.nn.MultiMarginLoss,
torch.nn.NLLLoss,
torch.nn.TransformerDecoder,
torch.nn.TransformerEncoder,
}
# modules that supported kwargs before
MODULES_WITH_PREVIOUS_KWARGS = {
torch.nn.Identity,
}
# lazy modules don't instantiate parameters right away
LAZY_MODULES = {
torch.nn.LazyBatchNorm1d,
torch.nn.LazyBatchNorm2d,
torch.nn.LazyBatchNorm3d,
torch.nn.LazyConv1d,
torch.nn.LazyConv2d,
torch.nn.LazyConv3d,
torch.nn.LazyConvTranspose1d,
torch.nn.LazyConvTranspose2d,
torch.nn.LazyConvTranspose3d,
torch.nn.LazyConvTranspose3d,
torch.nn.LazyInstanceNorm1d,
torch.nn.LazyInstanceNorm2d,
torch.nn.LazyInstanceNorm3d,
torch.nn.LazyLinear,
}
# these modules requires FBGEMM backend to instantiate
MODULES_THAT_REQUIRE_FBGEMM = {
torch.ao.nn.quantized.Conv1d,
torch.ao.nn.quantized.Conv2d,
torch.ao.nn.quantized.Conv3d,
torch.ao.nn.quantized.ConvTranspose1d,
torch.ao.nn.quantized.ConvTranspose2d,
torch.ao.nn.quantized.ConvTranspose3d,
torch.ao.nn.quantized.Linear,
# Remove the lines below after AO migration is complete
torch.ao.nn.quantized.Conv1d,
torch.ao.nn.quantized.Conv2d,
torch.ao.nn.quantized.Conv3d,
torch.ao.nn.quantized.ConvTranspose1d,
torch.ao.nn.quantized.ConvTranspose2d,
torch.ao.nn.quantized.ConvTranspose3d,
torch.ao.nn.quantized.Linear,
}
for namespace in NAMESPACES:
# the "nn" in "torch.nn"
namespace_basename = namespace.__name__.split('.')[-1]
for module_name in namespace.modules.__all__:
# class object for this module (e.g. torch.nn.Linear)
module_cls = getattr(namespace.modules, module_name)
if module_cls in MODULES_TO_SKIP:
continue
verify_kwargs = module_cls not in MODULES_WITHOUT_KWARGS_SUPPORT
module_is_lazy = module_cls in LAZY_MODULES
check_nonexistent_arg = module_cls not in MODULES_WITH_PREVIOUS_KWARGS
# Generate a function for testing this module and setattr it onto the test class.
run_test = generate_test_func(test_cls, module_cls, constructor_arg_db,
verify_kwargs=verify_kwargs,
module_is_lazy=module_is_lazy,
check_nonexistent_arg=check_nonexistent_arg)
test_name = f'test_{namespace_basename}_{module_name}'
if module_cls in MODULES_THAT_REQUIRE_FBGEMM:
run_test = skipIfNoFBGEMM(run_test)
setattr(TestModuleInit, test_name, run_test)
class TestModuleInit(TestCase):
_ignore_not_implemented_error = False
generate_tests(TestModuleInit, build_constructor_arg_db())
instantiate_device_type_tests(TestModuleInit, globals())
if __name__ == '__main__':
run_tests()
|
import inspect
import torch
from unittest import mock
from unittest.mock import MagicMock, patch
from torch.testing._internal.common_dtype import floating_types
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import TestCase, run_tests
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_module_tracker.py
|
test_module_hierarchy
|
def test_module_hierarchy(self):
seen_fw = []
seen_bw = []
class Foo(nn.Module):
def forward(self, x):
x = x["a"].relu_()
seen_fw.append((copy(tracker.parents), tracker.is_bw))
x.register_hook(
lambda grad: seen_bw.append((copy(tracker.parents), tracker.is_bw))
)
return {"a": torch.mm(x, x)}
class Mod(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = Foo()
self.b = nn.ModuleDict({"nest": Foo()})
self.c = nn.ModuleList([Foo()])
def forward(self, x):
x = self.c[0](x)
return self.b["nest"](self.a(x))
mod = Mod()
with ModuleTracker() as tracker:
mod({"a": torch.randn(10, 10, requires_grad=True).clone()})[
"a"
].sum().backward()
mod({"a": torch.randn(10, 10, requires_grad=True).clone()})[
"a"
].sum().backward()
self.assertEqual(
seen_fw,
[
({"Global", "Mod", "Mod.c.0"}, False),
({"Global", "Mod", "Mod.a"}, False),
({"Global", "Mod", "Mod.b.nest"}, False),
({"Global", "Mod", "Mod.c.0"}, False),
({"Global", "Mod", "Mod.a"}, False),
({"Global", "Mod", "Mod.b.nest"}, False),
],
)
self.assertEqual(
seen_bw,
[
({"Global", "Mod", "Mod.b.nest"}, True),
({"Global", "Mod", "Mod.a"}, True),
({"Global", "Mod", "Mod.c.0"}, True),
({"Global", "Mod", "Mod.b.nest"}, True),
({"Global", "Mod", "Mod.a"}, True),
({"Global", "Mod", "Mod.c.0"}, True),
],
)
|
from copy import copy
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
from torch.utils.module_tracker import ModuleTracker
class TestModuleTracker(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_module_tracker.py
|
forward
|
def forward(self, x):
x = x["a"].relu_()
seen_fw.append((copy(tracker.parents), tracker.is_bw))
x.register_hook(
lambda grad: seen_bw.append((copy(tracker.parents), tracker.is_bw))
)
return {"a": torch.mm(x, x)}
|
from copy import copy
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
from torch.utils.module_tracker import ModuleTracker
class Foo(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_module_tracker.py
|
forward
|
def forward(self, x):
x = x["a"].relu_()
seen_fw.append((copy(tracker.parents), tracker.is_bw))
x.register_hook(
lambda grad: seen_bw.append((copy(tracker.parents), tracker.is_bw))
)
return {"a": torch.mm(x, x)}
|
from copy import copy
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
from torch.utils.module_tracker import ModuleTracker
class Foo(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_module_tracker.py
|
test_confused_hierarchy
|
def test_confused_hierarchy(self):
class MyMod(nn.Module):
def __init__(self):
super().__init__()
self.inner = nn.Linear(2, 2)
self.ran = False
def forward(self, inp):
if not self.ran:
self.ran = True
return self(inp)
else:
self.ran = False
return self.inner(inp)
mod = MyMod()
inp = torch.rand(1, 2, requires_grad=True)
# Should not fail
with ModuleTracker() as tracker:
res = mod(inp)
res.sum().backward()
# Should not fail
with ModuleTracker() as tracker:
res = checkpoint(lambda inp: mod(inp), inp)
res.sum().backward()
|
from copy import copy
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
from torch.utils.module_tracker import ModuleTracker
class TestModuleTracker(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_module_tracker.py
|
__init__
|
def __init__(self) -> None:
super().__init__()
self.a = Foo()
self.b = nn.ModuleDict({"nest": Foo()})
self.c = nn.ModuleList([Foo()])
|
from copy import copy
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
from torch.utils.module_tracker import ModuleTracker
class Mod(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_module_tracker.py
|
forward
|
def forward(self, x):
x = x["a"].relu_()
seen_fw.append((copy(tracker.parents), tracker.is_bw))
x.register_hook(
lambda grad: seen_bw.append((copy(tracker.parents), tracker.is_bw))
)
return {"a": torch.mm(x, x)}
|
from copy import copy
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
from torch.utils.module_tracker import ModuleTracker
class Foo(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_module_tracker.py
|
test_bw_detection
|
def test_bw_detection(self):
mod = nn.Linear(2, 2)
with ModuleTracker() as tracker:
mod(torch.rand(2, requires_grad=True)).sum().backward()
self.assertFalse(tracker.is_bw)
self.assertEqual(tracker.parents, {"Global"})
|
from copy import copy
import torch
from torch import nn
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
from torch.utils.module_tracker import ModuleTracker
class TestModuleTracker(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_modules.py
|
_traverse_obj
|
def _traverse_obj(self, obj, func):
if isinstance(obj, (tuple, list)):
return type(obj)(self._traverse_obj(o, func) for o in obj)
elif isgenerator(obj):
return tuple(self._traverse_obj(o, func) for o in obj)
elif isinstance(obj, dict):
return {name: self._traverse_obj(o, func) for name, o in obj.items()}
elif isinstance(obj, (torch.Tensor, torch.nn.Parameter)):
return func(obj)
|
def _traverse_obj(self, obj, func):
if isinstance(obj, (tuple, list)):
return type(obj)(self._traverse_obj(o, func) for o in obj)
elif isgenerator(obj):
return tuple(self._traverse_obj(o, func) for o in obj)
elif isinstance(obj, dict):
return {name: self._traverse_obj(o, func) for name, o in obj.items()}
elif isinstance(obj, (torch.Tensor, torch.nn.Parameter)):
return func(obj)
else:
return obj
|
from itertools import product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, skipIfMps, skipIfTorchInductor)
from unittest.mock import patch, call
class TestModule(TestCase):
|
from itertools import chain, product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch._subclasses.meta_utils import assert_metadata_eq
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, ModuleErrorEnum, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, parametrize, wrapSwapTensorsTest)
from unittest.mock import patch, call
class TestModule(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_modules.py
|
test_non_contiguous_tensors
|
def test_non_contiguous_tensors(self, device, dtype, module_info, training):
# Check modules work with non-contiguous tensors
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=True, training=training)
def _make_non_contiguous(obj):
def inner_make_non_contiguous(obj):
# Scalar tensors can not be made non-contiguous
if not isinstance(obj, torch.Tensor) or obj.dim() == 0:
return obj
out = torch.repeat_interleave(obj, 2, dim=-1)
out = out[..., ::2].detach()
out.requires_grad = obj.requires_grad
return out
return self._traverse_obj(obj, inner_make_non_contiguous)
def _can_be_noncontiguous(obj):
if isinstance(obj, (tuple, list)):
return any(_can_be_noncontiguous(o) for o in obj)
elif isinstance(obj, dict):
return any(_can_be_noncontiguous(o) for o in obj.values())
# scalar tensors can not be non-contiguous
if not isinstance(obj, torch.Tensor) or obj.dim() == 0:
return False
return True
for module_input in module_inputs:
if module_input.forward_input is None:
continue
input_args, input_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
if not (_can_be_noncontiguous(input_args) or _can_be_noncontiguous(input_kwargs)):
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
self._retain_grad((input_args, input_kwargs))
# === Forward with default input
with freeze_rng_state():
default_output = m(*input_args, **input_kwargs)
if isinstance(default_output, torch.Tensor):
grad_output = default_output.clone().detach_().normal_()
default_output.backward(grad_output, retain_graph=True)
else:
grad_output = tuple(self._traverse_obj(o, lambda o: o.clone().detach_().normal_())
for o in default_output)
flattened_default_output, _ = torch.utils._pytree.tree_flatten(default_output)
flattened_grad_output, _ = torch.utils._pytree.tree_flatten(grad_output)
for o, g_o in zip(flattened_default_output, flattened_grad_output):
o.backward(g_o, retain_graph=True)
default_input_args_grad, default_input_kwargs_grad = deepcopy(self._get_grads((input_args, input_kwargs)))
default_param_grad = deepcopy([p.grad for p in m.parameters()])
# === Construct non-contiguous tensors ===
nc_input_args, nc_input_kwargs = _make_non_contiguous((input_args, input_kwargs))
nc_grad_output = _make_non_contiguous(grad_output)
# === Compare results with non-contiguous and contiguous tensors ===
inputs = [(input_args, input_kwargs), (nc_input_args, nc_input_kwargs)]
grads = [grad_output, nc_grad_output]
for (in_args, in_kwargs), g_out in product(inputs, grads):
g_out_copy = deepcopy(g_out)
self._zero_grad((in_args, in_kwargs))
self._zero_grad(m.parameters())
with freeze_rng_state():
out = m(*in_args, **in_kwargs)
if isinstance(out, torch.Tensor):
out.backward(g_out_copy, retain_graph=True)
else:
flattened_out, _ = torch.utils._pytree.tree_flatten(out)
flattened_g_out_copy, _ = torch.utils._pytree.tree_flatten(g_out_copy)
for o, g_o in zip(flattened_out, flattened_g_out_copy):
o.backward(g_o, retain_graph=True)
input_args_grad, input_kwargs_grad = self._get_grads((in_args, in_kwargs))
self.assertEqual(out, default_output)
self.assertEqual(input_args_grad, default_input_args_grad, atol=1e-4, rtol=0)
self.assertEqual(input_kwargs_grad, default_input_kwargs_grad, atol=1e-4, rtol=0)
param_grad = [p.grad for p in m.parameters()]
self.assertEqual(param_grad, default_param_grad)
|
def test_non_contiguous_tensors(self, device, dtype, module_info, training):
# Check modules work with non-contiguous tensors
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=True, training=training)
def _make_non_contiguous(obj):
def inner_make_non_contiguous(obj):
# Scalar tensors can not be made non-contiguous
if not isinstance(obj, torch.Tensor) or obj.dim() == 0:
return obj
out = torch.repeat_interleave(obj, 2, dim=-1)
out = out[..., ::2].detach()
out.requires_grad = obj.requires_grad
return out
return self._traverse_obj(obj, inner_make_non_contiguous)
def _can_be_noncontiguous(obj):
if isinstance(obj, (tuple, list)):
return any(_can_be_noncontiguous(o) for o in obj)
elif isinstance(obj, dict):
return any(_can_be_noncontiguous(o) for o in obj.values())
# scalar tensors can not be non-contiguous
return isinstance(obj, torch.Tensor) and obj.dim() != 0
for module_input in module_inputs:
if module_input.forward_input is None:
continue
input_args, input_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
if not (_can_be_noncontiguous(input_args) or _can_be_noncontiguous(input_kwargs)):
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
self._retain_grad((input_args, input_kwargs))
# === Forward with default input
with freeze_rng_state():
default_output = m(*input_args, **input_kwargs)
if isinstance(default_output, torch.Tensor):
grad_output = default_output.clone().detach_().normal_()
default_output.backward(grad_output, retain_graph=True)
else:
grad_output = tuple(self._traverse_obj(o, lambda o: o.clone().detach_().normal_() if o.requires_grad else None)
for o in default_output)
flattened_default_output = torch.utils._pytree.tree_leaves(default_output)
flattened_grad_output = torch.utils._pytree.tree_leaves(grad_output)
for o, g_o in zip(flattened_default_output, flattened_grad_output):
if (o.requires_grad):
o.backward(g_o, retain_graph=True)
default_input_args_grad, default_input_kwargs_grad = deepcopy(self._get_grads((input_args, input_kwargs)))
default_param_grad = deepcopy([p.grad for p in m.parameters()])
# === Construct non-contiguous tensors ===
nc_input_args, nc_input_kwargs = _make_non_contiguous((input_args, input_kwargs))
nc_grad_output = _make_non_contiguous(grad_output)
# === Compare results with non-contiguous and contiguous tensors ===
inputs = [(input_args, input_kwargs), (nc_input_args, nc_input_kwargs)]
grads = [grad_output, nc_grad_output]
for (in_args, in_kwargs), g_out in product(inputs, grads):
g_out_copy = deepcopy(g_out)
self._zero_grad((in_args, in_kwargs))
self._zero_grad(m.parameters())
with freeze_rng_state():
out = m(*in_args, **in_kwargs)
if isinstance(out, torch.Tensor):
out.backward(g_out_copy, retain_graph=True)
else:
flattened_out = torch.utils._pytree.tree_leaves(out)
flattened_g_out_copy = torch.utils._pytree.tree_leaves(g_out_copy)
for o, g_o in zip(flattened_out, flattened_g_out_copy):
if o.requires_grad:
o.backward(g_o, retain_graph=True)
input_args_grad, input_kwargs_grad = self._get_grads((in_args, in_kwargs))
self.assertEqual(out, default_output)
self.assertEqual(input_args_grad, default_input_args_grad, atol=1e-4, rtol=0)
self.assertEqual(input_kwargs_grad, default_input_kwargs_grad, atol=1e-4, rtol=0)
param_grad = [p.grad for p in m.parameters()]
self.assertEqual(param_grad, default_param_grad)
|
from itertools import product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, skipIfMps, skipIfTorchInductor)
from unittest.mock import patch, call
class TestModule(TestCase):
|
from itertools import chain, product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch._subclasses.meta_utils import assert_metadata_eq
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, ModuleErrorEnum, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, parametrize, wrapSwapTensorsTest)
from unittest.mock import patch, call
class TestModule(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_modules.py
|
_can_be_noncontiguous
|
def _can_be_noncontiguous(obj):
if isinstance(obj, (tuple, list)):
return any(_can_be_noncontiguous(o) for o in obj)
elif isinstance(obj, dict):
return any(_can_be_noncontiguous(o) for o in obj.values())
# scalar tensors can not be non-contiguous
if not isinstance(obj, torch.Tensor) or obj.dim() == 0:
return False
return True
for module_input in module_inputs:
if module_input.forward_input is None:
continue
input_args, input_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
if not (_can_be_noncontiguous(input_args) or _can_be_noncontiguous(input_kwargs)):
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
self._retain_grad((input_args, input_kwargs))
# === Forward with default input
with freeze_rng_state():
default_output = m(*input_args, **input_kwargs)
if isinstance(default_output, torch.Tensor):
grad_output = default_output.clone().detach_().normal_()
default_output.backward(grad_output, retain_graph=True)
else:
grad_output = tuple(self._traverse_obj(o, lambda o: o.clone().detach_().normal_())
for o in default_output)
flattened_default_output, _ = torch.utils._pytree.tree_flatten(default_output)
flattened_grad_output, _ = torch.utils._pytree.tree_flatten(grad_output)
for o, g_o in zip(flattened_default_output, flattened_grad_output):
o.backward(g_o, retain_graph=True)
default_input_args_grad, default_input_kwargs_grad = deepcopy(self._get_grads((input_args, input_kwargs)))
default_param_grad = deepcopy([p.grad for p in m.parameters()])
# === Construct non-contiguous tensors ===
nc_input_args, nc_input_kwargs = _make_non_contiguous((input_args, input_kwargs))
nc_grad_output = _make_non_contiguous(grad_output)
# === Compare results with non-contiguous and contiguous tensors ===
inputs = [(input_args, input_kwargs), (nc_input_args, nc_input_kwargs)]
grads = [grad_output, nc_grad_output]
for (in_args, in_kwargs), g_out in product(inputs, grads):
g_out_copy = deepcopy(g_out)
self._zero_grad((in_args, in_kwargs))
self._zero_grad(m.parameters())
with freeze_rng_state():
out = m(*in_args, **in_kwargs)
if isinstance(out, torch.Tensor):
out.backward(g_out_copy, retain_graph=True)
else:
flattened_out, _ = torch.utils._pytree.tree_flatten(out)
flattened_g_out_copy, _ = torch.utils._pytree.tree_flatten(g_out_copy)
for o, g_o in zip(flattened_out, flattened_g_out_copy):
o.backward(g_o, retain_graph=True)
input_args_grad, input_kwargs_grad = self._get_grads((in_args, in_kwargs))
self.assertEqual(out, default_output)
self.assertEqual(input_args_grad, default_input_args_grad, atol=1e-4, rtol=0)
self.assertEqual(input_kwargs_grad, default_input_kwargs_grad, atol=1e-4, rtol=0)
param_grad = [p.grad for p in m.parameters()]
self.assertEqual(param_grad, default_param_grad)
|
def _can_be_noncontiguous(obj):
if isinstance(obj, (tuple, list)):
return any(_can_be_noncontiguous(o) for o in obj)
elif isinstance(obj, dict):
return any(_can_be_noncontiguous(o) for o in obj.values())
# scalar tensors can not be non-contiguous
return isinstance(obj, torch.Tensor) and obj.dim() != 0
for module_input in module_inputs:
if module_input.forward_input is None:
continue
input_args, input_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
if not (_can_be_noncontiguous(input_args) or _can_be_noncontiguous(input_kwargs)):
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
self._retain_grad((input_args, input_kwargs))
# === Forward with default input
with freeze_rng_state():
default_output = m(*input_args, **input_kwargs)
if isinstance(default_output, torch.Tensor):
grad_output = default_output.clone().detach_().normal_()
default_output.backward(grad_output, retain_graph=True)
else:
grad_output = tuple(self._traverse_obj(o, lambda o: o.clone().detach_().normal_() if o.requires_grad else None)
for o in default_output)
flattened_default_output = torch.utils._pytree.tree_leaves(default_output)
flattened_grad_output = torch.utils._pytree.tree_leaves(grad_output)
for o, g_o in zip(flattened_default_output, flattened_grad_output):
if (o.requires_grad):
o.backward(g_o, retain_graph=True)
default_input_args_grad, default_input_kwargs_grad = deepcopy(self._get_grads((input_args, input_kwargs)))
default_param_grad = deepcopy([p.grad for p in m.parameters()])
# === Construct non-contiguous tensors ===
nc_input_args, nc_input_kwargs = _make_non_contiguous((input_args, input_kwargs))
nc_grad_output = _make_non_contiguous(grad_output)
# === Compare results with non-contiguous and contiguous tensors ===
inputs = [(input_args, input_kwargs), (nc_input_args, nc_input_kwargs)]
grads = [grad_output, nc_grad_output]
for (in_args, in_kwargs), g_out in product(inputs, grads):
g_out_copy = deepcopy(g_out)
self._zero_grad((in_args, in_kwargs))
self._zero_grad(m.parameters())
with freeze_rng_state():
out = m(*in_args, **in_kwargs)
if isinstance(out, torch.Tensor):
out.backward(g_out_copy, retain_graph=True)
else:
flattened_out = torch.utils._pytree.tree_leaves(out)
flattened_g_out_copy = torch.utils._pytree.tree_leaves(g_out_copy)
for o, g_o in zip(flattened_out, flattened_g_out_copy):
if o.requires_grad:
o.backward(g_o, retain_graph=True)
input_args_grad, input_kwargs_grad = self._get_grads((in_args, in_kwargs))
self.assertEqual(out, default_output)
self.assertEqual(input_args_grad, default_input_args_grad, atol=1e-4, rtol=0)
self.assertEqual(input_kwargs_grad, default_input_kwargs_grad, atol=1e-4, rtol=0)
param_grad = [p.grad for p in m.parameters()]
self.assertEqual(param_grad, default_param_grad)
|
from itertools import product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, skipIfMps, skipIfTorchInductor)
from unittest.mock import patch, call
|
from itertools import chain, product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch._subclasses.meta_utils import assert_metadata_eq
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, ModuleErrorEnum, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, parametrize, wrapSwapTensorsTest)
from unittest.mock import patch, call
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_modules.py
|
fn_to_gradcheck
|
def fn_to_gradcheck(*flat_input_and_params):
input_and_params = torch.utils._pytree.tree_unflatten(flat_input_and_params, flat_spec)
new_input_args = input_and_params[:len(input_args)]
kwarg_args = input_and_params[-len(kwarg_tensors):]
new_kwargs = {name: obj for (name, _), obj in zip(kwarg_tensors, kwarg_args)}
with freeze_rng_state():
output = m(*new_input_args, **new_kwargs, **other_kwargs)
output_flattened, _ = torch.utils._pytree.tree_flatten(output)
return output_flattened
self.assertTrue(check(fn_to_gradcheck, flat_input, nondet_tol=gradcheck_nondet_tol))
|
def fn_to_gradcheck(*flat_input_and_params):
input_and_params = torch.utils._pytree.tree_unflatten(flat_input_and_params, flat_spec)
new_input_args = input_and_params[:len(input_args)]
kwarg_args = input_and_params[-len(kwarg_tensors):]
new_kwargs = {name: obj for (name, _), obj in zip(kwarg_tensors, kwarg_args)}
with freeze_rng_state():
output = m(*new_input_args, **new_kwargs, **other_kwargs)
output_flattened = torch.utils._pytree.tree_leaves(output)
return output_flattened
# check total derivative
grad_input = input_args + params + tuple(obj for (_, obj) in kwarg_tensors)
flat_input, flat_spec = torch.utils._pytree.tree_flatten(grad_input)
self.assertTrue(check(fn_to_gradcheck, flat_input, nondet_tol=gradcheck_nondet_tol))
# check partial derivatives
old_params_requires_grad = [p.requires_grad for p in params]
for p in params:
p.requires_grad = False
old_kwargs_requires_grad = [obj.requires_grad for (_, obj) in kwarg_tensors]
for (_, obj) in kwarg_tensors:
obj.requires_grad = False
for p, old in zip(params, old_params_requires_grad):
p.requires_grad = old
grad_input = input_args + params + tuple(obj for (_, obj) in kwarg_tensors)
flat_input, flat_spec = torch.utils._pytree.tree_flatten(grad_input)
self.assertTrue(check(fn_to_gradcheck, flat_input, nondet_tol=gradcheck_nondet_tol))
p.requires_grad = False
for (_, obj), old in zip(kwarg_tensors, old_kwargs_requires_grad):
obj.requires_grad = old
grad_input = input_args + params + tuple(obj for (_, obj) in kwarg_tensors)
flat_input, flat_spec = torch.utils._pytree.tree_flatten(grad_input)
self.assertTrue(check(fn_to_gradcheck, flat_input, nondet_tol=gradcheck_nondet_tol))
obj.requires_grad = False
|
from itertools import product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, skipIfMps, skipIfTorchInductor)
from unittest.mock import patch, call
|
from itertools import chain, product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch._subclasses.meta_utils import assert_metadata_eq
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, ModuleErrorEnum, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, parametrize, wrapSwapTensorsTest)
from unittest.mock import patch, call
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_modules.py
|
_to_device
|
def _to_device(obj):
if isinstance(obj, torch.Tensor):
res = obj.detach().to(device=device)
res.requires_grad = obj.requires_grad
return res
elif isinstance(obj, tuple):
return tuple(_to_device(o) for o in obj)
elif isinstance(obj, dict):
return {key: _to_device(o) for key, o in obj.items()}
else:
return deepcopy(obj)
for module_input in module_inputs_cpu:
# === Move input from cpu to device ===
cpu_forward_args = module_input.forward_input.args
cpu_forward_kwargs = module_input.forward_input.kwargs
gpu_forward_args, gpu_forward_kwargs = _to_device((cpu_forward_args, cpu_forward_kwargs))
self._retain_grad((cpu_forward_args, cpu_forward_kwargs, gpu_forward_args, gpu_forward_kwargs))
# === Construct module on cpu and gpu ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
cpu_module = module_cls(*args, **kwargs).to(dtype).to("cpu")
cpu_module.train(training)
gpu_module = module_cls(*args, **kwargs).to(dtype).to(device)
gpu_module.train(training)
# === Lazy modules need to see an input to initialize params ===
if issubclass(module_cls, torch.nn.modules.lazy.LazyModuleMixin):
with torch.no_grad():
cpu_module(*cpu_forward_args, **cpu_forward_kwargs)
gpu_module(*gpu_forward_args, **gpu_forward_kwargs)
for cpu_p, gpu_p in zip(cpu_module.parameters(), gpu_module.parameters()):
gpu_p.data.copy_(cpu_p)
# === Compare forward output between cpu and gpu ===
cpu_outputs = cpu_module(*cpu_forward_args, **cpu_forward_kwargs)
gpu_outputs = gpu_module(*gpu_forward_args, **gpu_forward_kwargs)
self.assertEqual(cpu_outputs, gpu_outputs)
# === Run backwards on CPU and GPU and compare results ===
def check_backward(cpu_output, gpu_output):
cpu_grad_output = cpu_output.clone().normal_()
gpu_grad_output = cpu_grad_output.type_as(gpu_output)
cpu_output.backward(cpu_grad_output, retain_graph=True)
gpu_output.backward(gpu_grad_output, retain_graph=True)
cpu_grad_input = self._get_grads(cpu_forward_args)
gpu_grad_input = self._get_grads(gpu_forward_args)
self.assertEqual(cpu_grad_input, gpu_grad_input)
for cpu_p, gpu_p in zip(cpu_module.parameters(), gpu_module.parameters()):
self.assertEqual(cpu_p.grad, gpu_p.grad)
cpu_grad_kwarg_input = self._get_grads(cpu_forward_kwargs)
gpu_grad_kwarg_input = self._get_grads(gpu_forward_kwargs)
self.assertEqual(cpu_grad_kwarg_input, gpu_grad_kwarg_input)
for _ in range(5):
if isinstance(cpu_outputs, torch.Tensor):
check_backward(cpu_outputs, gpu_outputs)
else:
flatten_cpu_outputs, _ = torch.utils._pytree.tree_flatten(cpu_outputs)
flatten_gpu_outputs, _ = torch.utils._pytree.tree_flatten(gpu_outputs)
for cpu_output, gpu_output in zip(flatten_cpu_outputs, flatten_gpu_outputs):
check_backward(cpu_output, gpu_output)
|
def _to_device(obj):
if isinstance(obj, torch.Tensor):
res = obj.detach().to(device=device)
res.requires_grad = obj.requires_grad
return res
elif isinstance(obj, tuple):
return tuple(_to_device(o) for o in obj)
elif isinstance(obj, dict):
return {key: _to_device(o) for key, o in obj.items()}
else:
return deepcopy(obj)
for module_input in module_inputs_cpu:
# === Move input from cpu to device ===
cpu_forward_args = module_input.forward_input.args
cpu_forward_kwargs = module_input.forward_input.kwargs
gpu_forward_args, gpu_forward_kwargs = _to_device((cpu_forward_args, cpu_forward_kwargs))
self._retain_grad((cpu_forward_args, cpu_forward_kwargs, gpu_forward_args, gpu_forward_kwargs))
# === Construct module on cpu and gpu ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
cpu_module = module_cls(*args, **kwargs).to(dtype).to("cpu")
cpu_module.train(training)
gpu_module = module_cls(*args, **kwargs).to(dtype).to(device)
gpu_module.train(training)
# === Lazy modules need to see an input to initialize params ===
if issubclass(module_cls, torch.nn.modules.lazy.LazyModuleMixin):
with torch.no_grad():
cpu_module(*cpu_forward_args, **cpu_forward_kwargs)
gpu_module(*gpu_forward_args, **gpu_forward_kwargs)
for cpu_p, gpu_p in zip(cpu_module.parameters(), gpu_module.parameters()):
gpu_p.data.copy_(cpu_p)
# === Compare forward output between cpu and gpu ===
cpu_outputs = cpu_module(*cpu_forward_args, **cpu_forward_kwargs)
gpu_outputs = gpu_module(*gpu_forward_args, **gpu_forward_kwargs)
self.assertEqual(cpu_outputs, gpu_outputs)
# === Run backwards on CPU and GPU and compare results ===
def check_backward(cpu_output, gpu_output):
cpu_grad_output = cpu_output.clone().normal_()
gpu_grad_output = cpu_grad_output.type_as(gpu_output)
cpu_output.backward(cpu_grad_output, retain_graph=True)
gpu_output.backward(gpu_grad_output, retain_graph=True)
cpu_grad_input = self._get_grads(cpu_forward_args)
gpu_grad_input = self._get_grads(gpu_forward_args)
self.assertEqual(cpu_grad_input, gpu_grad_input)
for cpu_p, gpu_p in zip(cpu_module.parameters(), gpu_module.parameters()):
self.assertEqual(cpu_p.grad, gpu_p.grad)
cpu_grad_kwarg_input = self._get_grads(cpu_forward_kwargs)
gpu_grad_kwarg_input = self._get_grads(gpu_forward_kwargs)
self.assertEqual(cpu_grad_kwarg_input, gpu_grad_kwarg_input)
for _ in range(5):
if isinstance(cpu_outputs, torch.Tensor):
check_backward(cpu_outputs, gpu_outputs)
else:
flatten_cpu_outputs = torch.utils._pytree.tree_leaves(cpu_outputs)
flatten_gpu_outputs = torch.utils._pytree.tree_leaves(gpu_outputs)
for cpu_output, gpu_output in zip(flatten_cpu_outputs, flatten_gpu_outputs):
if cpu_output.requires_grad:
check_backward(cpu_output, gpu_output)
|
from itertools import product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, skipIfMps, skipIfTorchInductor)
from unittest.mock import patch, call
|
from itertools import chain, product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch._subclasses.meta_utils import assert_metadata_eq
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, ModuleErrorEnum, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, parametrize, wrapSwapTensorsTest)
from unittest.mock import patch, call
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_modules.py
|
check_backward
|
def check_backward(cpu_output, gpu_output):
cpu_grad_output = cpu_output.clone().normal_()
gpu_grad_output = cpu_grad_output.type_as(gpu_output)
cpu_output.backward(cpu_grad_output, retain_graph=True)
gpu_output.backward(gpu_grad_output, retain_graph=True)
cpu_grad_input = self._get_grads(cpu_forward_args)
gpu_grad_input = self._get_grads(gpu_forward_args)
self.assertEqual(cpu_grad_input, gpu_grad_input)
for cpu_p, gpu_p in zip(cpu_module.parameters(), gpu_module.parameters()):
self.assertEqual(cpu_p.grad, gpu_p.grad)
cpu_grad_kwarg_input = self._get_grads(cpu_forward_kwargs)
gpu_grad_kwarg_input = self._get_grads(gpu_forward_kwargs)
self.assertEqual(cpu_grad_kwarg_input, gpu_grad_kwarg_input)
for _ in range(5):
if isinstance(cpu_outputs, torch.Tensor):
check_backward(cpu_outputs, gpu_outputs)
else:
flatten_cpu_outputs, _ = torch.utils._pytree.tree_flatten(cpu_outputs)
flatten_gpu_outputs, _ = torch.utils._pytree.tree_flatten(gpu_outputs)
for cpu_output, gpu_output in zip(flatten_cpu_outputs, flatten_gpu_outputs):
check_backward(cpu_output, gpu_output)
|
def check_backward(cpu_output, gpu_output):
cpu_grad_output = cpu_output.clone().normal_()
gpu_grad_output = cpu_grad_output.type_as(gpu_output)
cpu_output.backward(cpu_grad_output, retain_graph=True)
gpu_output.backward(gpu_grad_output, retain_graph=True)
cpu_grad_input = self._get_grads(cpu_forward_args)
gpu_grad_input = self._get_grads(gpu_forward_args)
self.assertEqual(cpu_grad_input, gpu_grad_input)
for cpu_p, gpu_p in zip(cpu_module.parameters(), gpu_module.parameters()):
self.assertEqual(cpu_p.grad, gpu_p.grad)
cpu_grad_kwarg_input = self._get_grads(cpu_forward_kwargs)
gpu_grad_kwarg_input = self._get_grads(gpu_forward_kwargs)
self.assertEqual(cpu_grad_kwarg_input, gpu_grad_kwarg_input)
for _ in range(5):
if isinstance(cpu_outputs, torch.Tensor):
check_backward(cpu_outputs, gpu_outputs)
else:
flatten_cpu_outputs = torch.utils._pytree.tree_leaves(cpu_outputs)
flatten_gpu_outputs = torch.utils._pytree.tree_leaves(gpu_outputs)
for cpu_output, gpu_output in zip(flatten_cpu_outputs, flatten_gpu_outputs):
if cpu_output.requires_grad:
check_backward(cpu_output, gpu_output)
|
from itertools import product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, skipIfMps, skipIfTorchInductor)
from unittest.mock import patch, call
|
from itertools import chain, product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch._subclasses.meta_utils import assert_metadata_eq
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, ModuleErrorEnum, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, parametrize, wrapSwapTensorsTest)
from unittest.mock import patch, call
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_modules.py
|
inner_to_mem_format
|
def inner_to_mem_format(obj):
d = obj.dim()
if ((mem_format == torch.channels_last and d != 4)
or (mem_format == torch.channels_last_3d and d != 5)):
return obj
return obj.to(memory_format=mem_format)
return self._traverse_obj(obj, inner_to_mem_format)
|
def inner_to_mem_format(obj):
d = obj.dim()
if ((mem_format == torch.channels_last and d != 4)
or (mem_format == torch.channels_last_3d and d != 5)):
return obj.clone().detach().requires_grad_(obj.requires_grad)
return obj.clone().to(memory_format=mem_format).detach().requires_grad_(obj.requires_grad)
return self._traverse_obj(obj, inner_to_mem_format)
|
from itertools import product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, skipIfMps, skipIfTorchInductor)
from unittest.mock import patch, call
|
from itertools import chain, product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch._subclasses.meta_utils import assert_metadata_eq
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, ModuleErrorEnum, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, parametrize, wrapSwapTensorsTest)
from unittest.mock import patch, call
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_modules.py
|
inner_check_out_mem_format
|
def inner_check_out_mem_format(output):
d = output.dim()
if (d == 4 and ((input_mem_format == torch.channels_last)
or (module_mem_format == torch.channels_last and module_memformat_affects_out))):
self.assertTrue(output.is_contiguous(memory_format=torch.channels_last))
elif (d == 5 and ((input_mem_format == torch.channels_last_3d)
or (module_mem_format == torch.channels_last_3d and module_memformat_affects_out))):
self.assertTrue(output.is_contiguous(memory_format=torch.channels_last_3d))
else:
self.assertTrue(output.is_contiguous())
return self._traverse_obj(output, inner_check_out_mem_format)
|
def inner_check_out_mem_format(output):
d = output.dim()
if (d == 4 and ((input_mem_format == torch.channels_last)
or (module_mem_format == torch.channels_last and module_memformat_affects_out))):
self.assertTrue(output.numel() == 0 or output.is_contiguous(memory_format=torch.channels_last))
elif (d == 5 and ((input_mem_format == torch.channels_last_3d)
or (module_mem_format == torch.channels_last_3d and module_memformat_affects_out))):
self.assertTrue(output.numel() == 0 or output.is_contiguous(memory_format=torch.channels_last_3d))
else:
self.assertTrue(output.is_contiguous())
return self._traverse_obj(output, inner_check_out_mem_format)
|
from itertools import product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, skipIfMps, skipIfTorchInductor)
from unittest.mock import patch, call
|
from itertools import chain, product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch._subclasses.meta_utils import assert_metadata_eq
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, ModuleErrorEnum, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, parametrize, wrapSwapTensorsTest)
from unittest.mock import patch, call
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_modules.py
|
_to
|
def _to(m, set_grad=False):
for c in m.children():
_to(c, set_grad=set_grad)
for n, p in m.named_parameters(recurse=False):
new_p = torch.nn.Parameter(p.detach().clone().to(device, dtype))
setattr(m, n, new_p)
if set_grad:
new_p.grad = torch.randn_like(new_p)
for n, b in m.named_buffers(recurse=False):
new_b = b.detach().clone().to(device, dtype)
setattr(m, n, new_b)
_to(m, set_grad=set_grad)
# Check .to() can be run after forward and backward with swap
has_params = len(list(m.parameters())) > 0
if swap and not set_grad and has_params:
out = m(*args, **kwargs)
if isinstance(out, tuple):
out = out[0]
out.sum().backward()
m.to(dtype=torch.half)
# reset
m.to(dtype=torch.float32)
prev_device, prev_dtype = device, dtype
for device_, dtype_ in product(devices, dtypes):
# if device/dtype do not change, grad.to(device, dtype) is a no-op so
# swapping will not change ._cdata
# parameters will be wrapped in an nn.Parameter before swapping
# which will cause the ._cdata to change
g_no_swap = device_ == prev_device and dtype_ == prev_dtype
prev_prev_device, prev_prev_dtype = prev_device, prev_dtype
prev_device, prev_dtype = device_, dtype_
p_ids_before = [id(p) for p in m.parameters()]
p_cdatas_before = [p._cdata for p in m.parameters()]
if set_grad:
g_ids_before = [id(p.grad) for p in m.parameters()]
g_cdatas_before = [p.grad._cdata for p in m.parameters()]
m.to(device=device_, dtype=dtype_)
self.assertTrue(all(isinstance(p, torch.nn.Parameter) for p in m.parameters()))
self.assertTrue(all(p.device.type == device_ for p in m.parameters()))
self.assertTrue(all(p.dtype == dtype_ for p in m.parameters()))
p_ids_after = [id(p) for p in m.parameters()]
p_cdatas_after = [p._cdata for p in m.parameters()]
if set_grad:
self.assertTrue(all(p.grad.device.type == device_ for p in m.parameters()))
self.assertTrue(all(p.grad.dtype == dtype_ for p in m.parameters()))
g_ids_after = [id(p.grad) for p in m.parameters()]
g_cdatas_after = [p.grad._cdata for p in m.parameters()]
if swap:
# id same, ._cdata differs --> swapped cdata of THPVariable
self.assertTrue(all(a == b for a, b in zip(p_ids_before, p_ids_after)))
self.assertTrue(all(a != b for a, b in zip(p_cdatas_before, p_cdatas_after)))
if set_grad:
self.assertTrue(
all(a == b if g_no_swap else a != b for a, b in zip(g_cdatas_before, g_cdatas_after)))
else:
# id and _cdata remain the same --> .data setting
self.assertTrue(all(a == b for a, b in zip(p_cdatas_before, p_cdatas_after)))
self.assertTrue(all(a == b for a, b in zip(p_ids_before, p_ids_after)))
if set_grad:
self.assertTrue(all(a == b for a, b in zip(g_cdatas_before, g_cdatas_after)))
self.assertTrue(all(a == b for a, b in zip(g_ids_before, g_ids_after)))
|
from itertools import chain, product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch._subclasses.meta_utils import assert_metadata_eq
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, ModuleErrorEnum, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, parametrize, wrapSwapTensorsTest)
from unittest.mock import patch, call
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_modules.py
|
test_to_empty
|
if __name__ == '__main__':
run_tests()
|
def test_to_empty(self, device, dtype, module_info, swap, training):
module_cls = module_info.module_cls
with torch.device("meta"):
module_inputs = module_info.module_inputs_func(module_info, device=None, dtype=dtype,
requires_grad=False, training=training)
torch.__future__.set_swap_module_params_on_conversion(swap)
device_ = torch.device(device)
for module_input in module_inputs:
c_args, c_kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
with torch.device("meta"):
m = module_cls(*c_args, **c_kwargs)
p_ids_before = [id(p) for p in m.parameters()]
p_cdatas_before = [p._cdata for p in m.parameters()]
m.to_empty(device=device_)
self.assertTrue(all(isinstance(p, torch.nn.Parameter) for p in m.parameters()))
self.assertTrue(all(p.device == device_ for p in m.parameters()))
self.assertTrue(all(p.dtype == dtype for p in m.parameters()))
p_ids_after = [id(p) for p in m.parameters()]
p_cdatas_after = [p._cdata for p in m.parameters()]
if swap:
# id same, ._cdata differs --> swapped cdata of THPVariable
self.assertTrue(all(a == b for a, b in zip(p_ids_before, p_ids_after)))
self.assertTrue(all(a != b for a, b in zip(p_cdatas_before, p_cdatas_after)))
else:
# id and ._cdata differ
# meta and device have different shallow copy types, so this will create a new
# parameter and assign it to the module
self.assertTrue(all(a != b for a, b in zip(p_ids_before, p_ids_after)))
self.assertTrue(all(a != b for a, b in zip(p_cdatas_before, p_cdatas_after)))
|
from itertools import chain, product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch._subclasses.meta_utils import assert_metadata_eq
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, ModuleErrorEnum, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck, parametrize, wrapSwapTensorsTest)
from unittest.mock import patch, call
class TestModule(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_mps.py
|
wrapper
|
def wrapper(func):
if condition:
return unittest.expectedFailure(func)
else:
return func
return wrapper
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mps.py
|
xfailIfMacOS14_4Plus
|
def xfailIfMacOS14_4Plus(func):
return unittest.expectedFailure(func) if product_version > 14.3 else func # noqa: F821
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mps.py
|
test_pixel_shuffle_unshuffle_1D
|
def test_pixel_shuffle_unshuffle_1D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=1)
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mps.py
|
test_pixel_shuffle_unshuffle_2D
|
def test_pixel_shuffle_unshuffle_2D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=2)
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mps.py
|
test_pixel_shuffle_unshuffle_3D
|
def test_pixel_shuffle_unshuffle_3D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=3)
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mps.py
|
test_pixel_shuffle_unshuffle_4D
|
def test_pixel_shuffle_unshuffle_4D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=4)
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mps.py
|
test_copy_cast_no_leak
|
def test_copy_cast_no_leak(self):
def step(x):
x = x.to(device='cpu', dtype=torch.float32)
x = x.to(device='mps', dtype=torch.float16)
a = torch.randn(128, 128, device='mps', dtype=torch.float16)
# Warm up / prebuild MPS shaders (otherwise check fails on 13.2)
step(a)
torch.mps.empty_cache()
driver_before = torch.mps.driver_allocated_memory()
step(a)
torch.mps.empty_cache()
driver_after = torch.mps.driver_allocated_memory()
self.assertEqual(driver_before, driver_after, f"Detected {driver_after-driver_before} bytes leak of GPU memory")
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
class TestMemoryLeak(TestCaseMPS):
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mps.py
|
step
|
def step(x):
x = x.to(device='cpu', dtype=torch.float32)
x = x.to(device='mps', dtype=torch.float16)
a = torch.randn(128, 128, device='mps', dtype=torch.float16)
# Warm up / prebuild MPS shaders (otherwise check fails on 13.2)
step(a)
torch.mps.empty_cache()
driver_before = torch.mps.driver_allocated_memory()
step(a)
torch.mps.empty_cache()
driver_after = torch.mps.driver_allocated_memory()
self.assertEqual(driver_before, driver_after, f"Detected {driver_after-driver_before} bytes leak of GPU memory")
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mps.py
|
test_pixel_shuffle_unshuffle
|
def test_pixel_shuffle_unshuffle(self):
def _test_pixel_shuffle_unshuffle_helper(num_input_dims, valid_channels_dim=True,
upscale_factor=None, is_contiguous=True):
def generate_input():
# If valid_channels_dim=False, add 1 to make channels dim indivisible by upscale_factor ** 2.
channels = random.randint(1, 4) * upscale_factor ** 2 + (0 if valid_channels_dim else 1)
height = random.randint(5, 10)
width = random.randint(5, 10)
if num_input_dims == 1:
input = torch.rand(channels, requires_grad=True, device='mps')
assert is_contiguous
elif num_input_dims == 2:
input = torch.rand(width, height, requires_grad=True, device='mps').T
if is_contiguous:
input = input.contiguous()
else:
batch_sizes = [random.randint(1, 3) for _ in range(num_input_dims - 3)]
input = torch.rand(*batch_sizes, channels, width, height, requires_grad=True, device='mps')
input = input.transpose(-1, -2)
if is_contiguous:
input = input.contiguous()
if not is_contiguous and len(input.reshape(-1)) > 0:
assert not input.is_contiguous()
input = input.detach().clone()
input.requires_grad = True
return input
# Function to imperatively ensure pixels are shuffled to the correct locations.
# Used to validate the batch operations in pixel_shuffle.
def _verify_pixel_shuffle(input, output, upscale_factor):
for c in range(output.size(-3)):
for h in range(output.size(-2)):
for w in range(output.size(-1)):
height_idx = h // upscale_factor
weight_idx = w // upscale_factor
channel_idx = (upscale_factor * (h % upscale_factor)) + (w % upscale_factor) + \
(c * upscale_factor ** 2)
self.assertEqual(output[..., c, h, w], input[..., channel_idx, height_idx, weight_idx])
upscale_factor = random.randint(2, 5) if upscale_factor is None else upscale_factor
input = generate_input()
ps = nn.PixelShuffle(upscale_factor)
pus = nn.PixelUnshuffle(downscale_factor=upscale_factor)
if num_input_dims >= 3 and valid_channels_dim and upscale_factor > 0:
output = ps(input)
_verify_pixel_shuffle(input, output, upscale_factor)
output.backward(output.data)
self.assertEqual(input.data, input.grad.data)
# Ensure unshuffle properly inverts shuffle.
unshuffle_output = pus(output)
self.assertEqual(input, unshuffle_output)
else:
self.assertRaises(RuntimeError, lambda: ps(input))
def _test_pixel_unshuffle_error_case_helper(num_input_dims, valid_height_dim=True, valid_width_dim=True,
downscale_factor=None):
downscale_factor = random.randint(2, 5) if downscale_factor is None else downscale_factor
channels = random.randint(1, 4)
# If valid_height_dim=False, add 1 to make height dim indivisible by downscale_factor.
height = random.randint(3, 5) * abs(downscale_factor) + (0 if valid_height_dim else 1)
# If valid_width_dim=False, add 1 to make width dim indivisible by downscale_factor.
width = random.randint(3, 5) * abs(downscale_factor) + (0 if valid_width_dim else 1)
if num_input_dims == 1:
input = torch.rand(channels, requires_grad=True, device='mps')
elif num_input_dims == 2:
input = torch.rand(height, width, requires_grad=True, device='mps')
else:
batch_sizes = [random.randint(1, 3) for _ in range(num_input_dims - 3)]
input = torch.rand(*batch_sizes, channels, height, width, requires_grad=True, device='mps')
pus = nn.PixelUnshuffle(downscale_factor)
self.assertRaises(RuntimeError, lambda: pus(input))
def _test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims):
# For 1D - 2D, this is an error case.
# For 3D - 5D, this is a success case for pixel_shuffle + pixel_unshuffle.
is_contiguous_check = [True, False] if num_input_dims > 1 else [True]
for is_contiguous in is_contiguous_check:
_test_pixel_shuffle_unshuffle_helper(
num_input_dims=num_input_dims, is_contiguous=is_contiguous
)
_test_pixel_shuffle_unshuffle_helper(
num_input_dims=num_input_dims, valid_channels_dim=False, is_contiguous=is_contiguous
)
_test_pixel_shuffle_unshuffle_helper(
num_input_dims=num_input_dims, upscale_factor=0, is_contiguous=is_contiguous
)
_test_pixel_shuffle_unshuffle_helper(
num_input_dims=num_input_dims, upscale_factor=-2, is_contiguous=is_contiguous
)
# Error cases for pixel_unshuffle.
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, valid_height_dim=False)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, valid_width_dim=False)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, downscale_factor=0)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, downscale_factor=-2)
def test_pixel_shuffle_unshuffle_1D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=1)
def test_pixel_shuffle_unshuffle_2D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=2)
def test_pixel_shuffle_unshuffle_3D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=3)
def test_pixel_shuffle_unshuffle_4D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=4)
def test_pixel_shuffle_unshuffle_5D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=5)
test_pixel_shuffle_unshuffle_1D()
test_pixel_shuffle_unshuffle_2D()
test_pixel_shuffle_unshuffle_3D()
test_pixel_shuffle_unshuffle_4D()
test_pixel_shuffle_unshuffle_5D()
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
class TestPixelShuffle(TestCaseMPS):
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mps.py
|
generate_input
|
def generate_input():
# If valid_channels_dim=False, add 1 to make channels dim indivisible by upscale_factor ** 2.
channels = random.randint(1, 4) * upscale_factor ** 2 + (0 if valid_channels_dim else 1)
height = random.randint(5, 10)
width = random.randint(5, 10)
if num_input_dims == 1:
input = torch.rand(channels, requires_grad=True, device='mps')
assert is_contiguous
elif num_input_dims == 2:
input = torch.rand(width, height, requires_grad=True, device='mps').T
if is_contiguous:
input = input.contiguous()
else:
batch_sizes = [random.randint(1, 3) for _ in range(num_input_dims - 3)]
input = torch.rand(*batch_sizes, channels, width, height, requires_grad=True, device='mps')
input = input.transpose(-1, -2)
if is_contiguous:
input = input.contiguous()
if not is_contiguous and len(input.reshape(-1)) > 0:
assert not input.is_contiguous()
input = input.detach().clone()
input.requires_grad = True
return input
# Function to imperatively ensure pixels are shuffled to the correct locations.
# Used to validate the batch operations in pixel_shuffle.
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mps.py
|
_verify_pixel_shuffle
|
def _verify_pixel_shuffle(input, output, upscale_factor):
for c in range(output.size(-3)):
for h in range(output.size(-2)):
for w in range(output.size(-1)):
height_idx = h // upscale_factor
weight_idx = w // upscale_factor
channel_idx = (upscale_factor * (h % upscale_factor)) + (w % upscale_factor) + \
(c * upscale_factor ** 2)
self.assertEqual(output[..., c, h, w], input[..., channel_idx, height_idx, weight_idx])
upscale_factor = random.randint(2, 5) if upscale_factor is None else upscale_factor
input = generate_input()
ps = nn.PixelShuffle(upscale_factor)
pus = nn.PixelUnshuffle(downscale_factor=upscale_factor)
if num_input_dims >= 3 and valid_channels_dim and upscale_factor > 0:
output = ps(input)
_verify_pixel_shuffle(input, output, upscale_factor)
output.backward(output.data)
self.assertEqual(input.data, input.grad.data)
# Ensure unshuffle properly inverts shuffle.
unshuffle_output = pus(output)
self.assertEqual(input, unshuffle_output)
else:
self.assertRaises(RuntimeError, lambda: ps(input))
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mps.py
|
_test_pixel_shuffle_unshuffle_for_input_dims
|
def _test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims):
# For 1D - 2D, this is an error case.
# For 3D - 5D, this is a success case for pixel_shuffle + pixel_unshuffle.
is_contiguous_check = [True, False] if num_input_dims > 1 else [True]
for is_contiguous in is_contiguous_check:
_test_pixel_shuffle_unshuffle_helper(
num_input_dims=num_input_dims, is_contiguous=is_contiguous
)
_test_pixel_shuffle_unshuffle_helper(
num_input_dims=num_input_dims, valid_channels_dim=False, is_contiguous=is_contiguous
)
_test_pixel_shuffle_unshuffle_helper(
num_input_dims=num_input_dims, upscale_factor=0, is_contiguous=is_contiguous
)
_test_pixel_shuffle_unshuffle_helper(
num_input_dims=num_input_dims, upscale_factor=-2, is_contiguous=is_contiguous
)
# Error cases for pixel_unshuffle.
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, valid_height_dim=False)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, valid_width_dim=False)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, downscale_factor=0)
_test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, downscale_factor=-2)
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mps.py
|
mps_ops_grad_modifier
|
def mps_ops_grad_modifier(ops):
XFAILLIST_GRAD = {
# precision issues
'special.polygammaspecial_polygamma_n_0': [torch.float16],
'polygammapolygamma_n_0': [torch.float16],
'nn.functional.binary_cross_entropy': [torch.float16],
# Unimplemented ops
'__getitem__': [torch.float16],
'_segment_reduce': [torch.float16, torch.float32],
'_chunk_cat': [torch.float16, torch.float32],
'unfold_copy': [torch.float16, torch.float32], # unfold_backward is not implemented
'unfold': [torch.float16, torch.float32],
'sparse.mmreduce': [torch.float32], # csr not supported
'unique_consecutive': [torch.float16, torch.float32],
'special_modified_bessel_i0': [torch.float16, torch.float32],
'scalar_tensor': [torch.float16, torch.float32],
'cdist': [torch.float32],
'masked.scatter': [torch.float16, torch.float32],
'index_fill': [torch.float16, torch.float32], # missing `aten::_unique`.
'linalg.lu_factor': [torch.float16, torch.float32], # missing `aten::lu_unpack`.
'aminmax': [torch.float32, torch.float16],
# Correctness issues
'atanh': [torch.float32],
# Random output
'exponential': [torch.float16, torch.float32],
# CPU errors
# derivative for aten::nextafter is not implemented on CPU
'nextafter': None,
# derivative for aten::floor_divide is not implemented on CPU
'floor_divide': [torch.float16, torch.float32],
# derivative for aten::narrow_copy is not implemented on CPU
'narrow_copy': [torch.float16, torch.float32],
# derivative for aten::_histogramdd_from_bin_cts is not implemented on CPU
'histogramdd': [torch.float16, torch.float32],
# derivative for aten::histogram is not implemented
'histogram': [torch.float16, torch.float32],
# 'bool' object is not iterable
'allclose': [torch.float16, torch.float32],
'equal': [torch.float16, torch.float32],
# 'float' object is not iterable
'item': [torch.float16, torch.float32],
# "mse_backward_cpu_out" not implemented for 'Half'
'nn.functional.mse_loss': [torch.float16],
# "smooth_l1_backward_cpu_out" not implemented for 'Half'
'nn.functional.smooth_l1_loss': [torch.float16],
# cpu error: grad requires non-empty inputs
'randn': [torch.float16, torch.float32],
'signal.windows.bartlett': [torch.float32],
'signal.windows.blackman': [torch.float32],
'signal.windows.cosine': [torch.float32],
'signal.windows.exponential': [torch.float32],
'signal.windows.gaussian': [torch.float32],
'signal.windows.general_cosine': [torch.float32],
'signal.windows.general_hamming': [torch.float32],
'signal.windows.hamming': [torch.float32],
'signal.windows.hann': [torch.float32],
'signal.windows.kaiser': [torch.float32],
'signal.windows.nuttall': [torch.float32],
'eye': [torch.float16, torch.float32],
# trunc_tensor not working properly for float16
'divtrunc_rounding': [torch.float16],
'fmod': [torch.float16],
# round not working properly for float16
'round': [torch.float16],
# atomic operation in backward pass
'_unsafe_masked_index': [torch.float16],
'_unsafe_masked_index_put_accumulate': [torch.float16],
}
MACOS_12_3_XFAILLIST_GRAD = {
# Unsupported Border padding mode, forward pass success as fallback to cpu
'grid_sampler_2d': [torch.float32],
# Unimplemented
'logaddexp2': [torch.float32],
}
MACOS_BEFORE_13_3_XFAILLIST_GRAD = {
# Failures due to precision issues (due to fast-math). These has been fixed in MacOS 13.3+
'masked.softmin': [torch.float32, torch.float16],
'masked.softmax': [torch.float32, torch.float16],
'masked.log_softmax': [torch.float32, torch.float16],
# Unsupported Border padding mode, forward pass success as fallback to cpu
'grid_sampler_2d': [torch.float32],
# Same issue as `argsort` and `sort` with duplicate elements (undefined behaviour).
# Forward pass is passing since `msort` doesn't return the indices, just the values, which match the CPU.
# On the backward pass for `sort` both are used (values and indices), thus resulting in a issmatch between CPU and MPS.
# Running `msort` with stable `sort` passes.
'msort': [torch.float16],
}
SKIPLIST_GRAD = {
'nn.functional.pairwise_distance': [torch.float16],
# failed assertion `destination datatype must be fp32'
'nn.functional.conv1d': [torch.float16],
'nn.functional.conv2d': [torch.float16],
'nn.functional.conv3d': [torch.float16],
'nn.functional.conv_transpose1d': [torch.float16],
'nn.functional.conv_transpose2d': [torch.float16],
'nn.functional.conv_transpose3d': [torch.float16],
}
MACOS_13_3_XFAILLIST_GRAD = {
# Same issue as `argsort` and `sort` with duplicate elements (undefined behaviour).
# Forward pass is passing since `msort` doesn't return the indices, just the values, which match the CPU.
# On the backward pass for `sort` both are used (values and indices), thus resulting in a issmatch between CPU and MPS.
# Running `msort` with stable `sort` passes.
'msort': [torch.float16],
}
ON_MPS_XFAILLIST = {
# Failures due to lack of implementation of downstream functions on MPS backend
# TODO: remove these once downstream function 'aten::_linalg_svd.U' have been implemented
'linalg.matrix_rank': None,
# Exception: Caused by sample input at index 3 on MPS
'nn.functional.conv3d': [torch.float32],
}
def addDecorator(op, d) -> None:
op.decorators = list(op.decorators) if op.decorators is not None else []
op.decorators.append(d)
for op in ops:
key = op.name + op.variant_test_name
if key in XFAILLIST_GRAD:
addDecorator(op, DecorateInfo(
unittest.expectedFailure,
dtypes=XFAILLIST_GRAD[key]))
if key in SKIPLIST_GRAD:
addDecorator(op, DecorateInfo(
unittest.skip,
dtypes=SKIPLIST_GRAD[key]))
if key in ON_MPS_XFAILLIST:
addDecorator(op, DecorateInfo(
unittest.expectedFailure,
dtypes=ON_MPS_XFAILLIST[key]))
if key in MACOS_12_3_XFAILLIST_GRAD and (not torch.backends.mps.is_macos13_or_newer()):
addDecorator(op, DecorateInfo(
unittest.expectedFailure,
dtypes=MACOS_12_3_XFAILLIST_GRAD[key]))
if key in MACOS_BEFORE_13_3_XFAILLIST_GRAD and (torch.backends.mps.is_macos13_or_newer() and product_version < 13.3):
addDecorator(op, DecorateInfo(
unittest.expectedFailure,
dtypes=MACOS_BEFORE_13_3_XFAILLIST_GRAD[key]))
if key in MACOS_13_3_XFAILLIST_GRAD and (product_version >= 13.3):
addDecorator(op, DecorateInfo(
unittest.expectedFailure,
dtypes=MACOS_13_3_XFAILLIST_GRAD[key]))
yield op
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mps.py
|
mps_ops_modifier
|
def mps_ops_modifier(ops):
# Supported complex OPS
SUPPORTED_COMPLEX_OPS = {
'__radd__',
'__rmul__',
'__getitem__',
'abs',
'add',
'alias_copy',
'argwhere',
'atleast_1d',
'atleast_2d',
'atleast_3d',
'as_strided',
'as_strided_copy',
'as_strided_scatter',
'broadcast_tensors',
'broadcast_to',
'chalf',
'cfloat',
'chunk',
'clone',
'conj',
'conj_physical',
'contiguous',
'diag',
'diag_embed',
'diagflat',
'diagonal',
'diagonal_copy',
'diagonal_scatter',
'dsplit',
'empty',
'empty_permuted',
'empty_strided',
'eye',
'exp',
'expand',
'expand_as',
'expand_copy',
'flatten',
'fill',
'full',
'H',
'hsplit',
'imag',
'index_select',
'isfinite',
'isinf',
'isreal',
'item',
'kron',
'linalg.diagonal',
'linalg.svd',
'linspace',
'logspace',
'linspacetensor_overload',
'logspacetensor_overload',
'mH',
'mT',
'masked_scatter',
'masked_select',
'meshgridlist_of_tensors',
'meshgridvariadic_tensors',
'movedim',
'mul',
'narrow',
'narrow_copy',
'nn.functional.conv1d',
'nn.functional.conv2d',
'nn.functional.conv_transpose1d',
'nn.functional.conv_transpose2d',
'nn.functional.feature_alpha_dropoutwithout_train',
'nn.functional.padcircular',
'nn.functional.tanhshrink',
'nn.functional.unfold',
'nonzero',
'ones',
'outer',
'permute',
'positive',
'randn',
'ravel',
'real',
'repeat_interleave',
'reshape_as',
'reshape',
'resolve_conj',
'resolve_neg',
'scalar_tensor',
'select',
'sgn',
'slice',
'split',
'split_with_sizes',
'split_with_sizes_copy',
'splitlist_args',
'squeeze',
'squeezemultiple',
'sub',
'svd',
't',
't_copy',
'tanh',
'tensor_split',
'transpose',
'T',
'unbind',
'unflatten',
'unfold',
'unfold_copy',
'unsafe_chunk',
'unsafe_split',
'unsqueeze',
'unsqueeze_copy',
'view_as',
'view_as_real',
'view',
'view_copy',
'vsplit',
'zero_',
'zeros',
}
AFTER_MACOS_14_0_SUPPORTED_COMPLEX_OPS = {
'__rdiv__',
'__rmatmul__',
'_chunk_cat',
'_unsafe_masked_index',
'acos',
'acosh',
'all',
'allclose',
'any',
'addcdiv',
'addcmul',
'addmmdecomposed',
'addmv',
'asin',
'atan',
'atanh',
'bfloat16',
'bmm',
'bool',
'cartesian_prod',
'cat',
'char',
'column_stack',
'combinations',
'corrcoef',
'constant_pad_nd',
'cos',
'cosh',
'count_nonzero',
'diff',
'div',
'divno_rounding_mode',
'dot',
'dstack',
'einsum',
'eq',
'equal',
'exp2',
'expm1',
'fft.fft',
'fft.fft2',
'fft.fftn',
'fft.fftshift',
'fft.ifft',
'fft.ifft2',
'fft.ifftn',
'fft.ifftshift',
'fft.irfftn',
'fft.irfft2',
'fft.irfft',
'fft.hfftn',
'fft.hfft2',
'fft.hfft',
'flip',
'fliplr',
'flipud',
'float',
'gradient',
'half',
'hstack',
'inner',
'int',
'isclose',
'isnan',
'ldexp',
'linalg.multi_dot',
'linalg.pinv',
'log10',
'log1p',
'log2',
'log',
'logical_and',
'logical_not',
'logical_or',
'logical_xor',
'logsumexp',
'long',
'masked_fill',
'masked.mean',
'masked.prod',
'masked.std',
'masked.sum',
'masked.var',
'masked.logsumexp',
'matmul',
'mean',
'mm',
'mv',
'ne',
'neg',
'nn.functional.padconstant',
'nn.functional.padreflect',
'nn.functional.padreplicate',
'nn.functional.pixel_shuffle',
'nn.functional.pixel_unshuffle',
'nn.functional.rms_norm',
'nn.functional.softsign',
'pinverse',
'prod',
'reciprocal',
'roll',
'rot90',
'rsqrt',
'short',
'sigmoid',
'sin',
'sinh',
'sqrt',
'square',
'stack',
'stft',
'sum',
'sum_to_size',
'tan',
'tensordot',
'trace',
'trapz',
'trapezoid',
'tril',
'triu',
'true_divide',
'vstack',
'where',
'byte',
}
# Those ops worked on MacOS12, but broken on MacOS13, see https://github.com/pytorch/pytorch/issues/85758
MACOS_12_3_XFAILLIST = {
# Top 60
# expected failures
# The result of pow(9 , 8) is showing 43046716, whereas it should've been 43046721.
# fixed in macOS 13.3. Currently error is not raised.
'pow': [torch.int16, torch.int64, torch.uint8, torch.int8],
# expected failures
'__rpow__': [torch.uint8, torch.int8],
# Failures due to precision issues (due to fast-math). These has been fixed in MacOS 13.3+
'cdist': [torch.float32],
'tan': [torch.uint8, torch.float32],
# Data type support starts from macOS 13
'nn.functional.avg_pool1d': [torch.int64],
'nn.functional.avg_pool2d': [torch.int64],
'nn.functional.local_response_norm': [torch.int64],
'__radd__': [torch.uint8],
'__rdiv__': [torch.uint8],
'__rmul__': [torch.uint8],
'abs': [torch.uint8],
'acos': [torch.uint8],
'acosh': [torch.uint8],
'add': [torch.uint8],
'asin': [torch.uint8],
'asinh': [torch.uint8],
'atan': [torch.uint8],
'atanh': [torch.uint8],
'ceil': [torch.uint8],
'corrcoef': [torch.uint8],
'cos': [torch.uint8],
'cosh': [torch.uint8],
'cov': [torch.uint8],
'cumulative_trapezoid': [torch.uint8],
'deg2rad': [torch.uint8],
'diff': [torch.uint8],
'eq': [torch.uint8],
'equal': [torch.uint8],
'erf': [torch.uint8],
'exp2': [torch.uint8],
'exp': [torch.uint8],
'expm1': [torch.uint8],
'floor': [torch.uint8],
'fmax': [torch.uint8],
'fmin': [torch.uint8],
'fmod': [torch.uint8],
'ge': [torch.uint8],
'gt': [torch.uint8],
'isclose': [torch.uint8],
'isnan': [torch.uint8],
'kron': [torch.uint8],
'le': [torch.uint8],
'log10': [torch.uint8],
'log1p': [torch.uint8],
'log2': [torch.uint8],
'log': [torch.uint8],
'logical_and': [torch.uint8],
'logical_or': [torch.uint8],
'logical_xor': [torch.uint8],
'logit': [torch.uint8],
'lt': [torch.uint8],
'masked.mean': [torch.uint8],
'masked.std': [torch.uint8],
'masked.var': [torch.uint8],
'maximum': [torch.uint8],
'minimum': [torch.uint8],
'mul': [torch.uint8],
'ne': [torch.uint8],
'neg': [torch.uint8],
'nn.functional.cosine_embedding_loss': [torch.uint8],
'nn.functional.margin_ranking_loss': [torch.uint8],
'nn.functional.poisson_nll_loss': [torch.uint8],
'nn.functional.softsign': [torch.uint8],
'nn.functional.tanhshrink': [torch.uint8],
'nn.functional.triplet_margin_loss': [torch.uint8],
'nn.functional.triplet_margin_with_distance_loss': [torch.uint8],
'nn.functional.pairwise_distance': [torch.uint8],
'outer': [torch.uint8],
'rad2deg': [torch.uint8],
'reciprocal': [torch.uint8],
'remainder': [torch.uint8],
'round': [torch.uint8],
'rsqrt': [torch.uint8],
'sigmoid': [torch.uint8],
'sign': [torch.uint8],
'signbit': [torch.uint8],
'sin': [torch.uint8],
'sinh': [torch.uint8],
'special.ndtr': [torch.uint8],
'sqrt': [torch.uint8],
'sub': [torch.uint8],
'trapezoid': [torch.uint8],
'trapz': [torch.uint8],
'true_divide': [torch.uint8],
'trunc': [torch.uint8],
'xlogy': [torch.uint8],
'minbinary': [torch.uint8],
'maxbinary': [torch.uint8],
'divtrunc_rounding': [torch.uint8],
'divfloor_rounding': [torch.uint8],
'divno_rounding_mode': [torch.uint8],
'floor_divide': [torch.uint8],
'ldexp': [torch.uint8],
# square internally calls into power, and will type cast to int64, which supports starting from macOS 13
'square': [torch.bool, torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
# cpu not giving nan for x/0.0
'atan2': [torch.bool, torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
# inconsistency errors between cpu and mps, max seen atol is 2
'nn.functional.interpolatebilinear': [torch.uint8],
}
MACOS_BEFORE_13_3_XFAILLIST = {
# Failures due to precision issues (due to fast-math). These has been fixed in MacOS 13.3+
'tan': [torch.float32],
'cdist': [torch.float32],
# CPU Error: cpu not giving nan for x/0.0
'atan2': [torch.bool, torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
# test blow pass on macOS 12 as it falls back to cpu
# Argsort case using duplicate indices (undefined behaviour):
# - CPU output: tensor([2546, 6917, 3181, ..., 7128, 5133, 30], devuce='cpu')
# - MPS output: tensor([2546, 6917, 3181, ..., 7128, 30, 5133], device='mps:0')
# Elements from index 30 and 5133 are both equal.
# Since CPU is not using argsort with stable=True, these cases result in undefined behaviour.
'argsort': [torch.float16, torch.int8, torch.uint8, torch.bool],
# Same issue as `argsort` with duplicate indices. This test checks both the sorted values and the indices.
# The values of the sorted tensor match the CPU, but in case of the returned indices this results in undefined behaviour.
'sort': [torch.int8, torch.uint8, torch.bool, torch.float16],
# Unsupported dtypes
'cumsum': [torch.int64],
'cumprod': [torch.int64],
'cumulative_trapezoid': [torch.int64],
'masked.cumsum': [torch.int64],
'masked.cumprod': [torch.int64],
'linalg.vander': [torch.int64],
}
MACOS_AFTER_13_1_XFAILLIST = {
# before macOS 13.2 it falls back to cpu and pass the forward pass
'grid_sampler_2d': [torch.float32], # Unsupported Border padding mode
# inconsistency errors between cpu and mps, max seen atol is 2
'nn.functional.interpolatebilinear': [torch.uint8],
}
MACOS_13_3_XFAILLIST = {
# Failure due to precision issue for fp16
# on both cpu and mps there are test cases that might produce inf result
# 'nn.functional.pairwise_distance': [torch.float16],
# test blow pass on macOS 12 as it falls back to cpu
# Argsort case using duplicate indices (undefined behaviour):
# - CPU output: tensor([2546, 6917, 3181, ..., 7128, 5133, 30], devuce='cpu')
# - MPS output: tensor([2546, 6917, 3181, ..., 7128, 30, 5133], device='mps:0')
# Elements from index 30 and 5133 are both equal.
# Since CPU is not using argsort with stable=True, these cases result in undefined behaviour.
'argsort': [torch.float16, torch.int8, torch.uint8, torch.bool],
# Same issue as `argsort` with duplicate indices. This test checks both the sorted values and the indices.
# The values of the sorted tensor match the CPU, but in case of the returned indices this results in undefined behaviour.
'sort': [torch.int8, torch.uint8, torch.bool, torch.float16],
}
MACOS_BEFORE_14_4_XFAILLIST = {
# These ops work fine in 14.4 but fail in 14.2 or 13.x
'fft.hfft2': [torch.complex64],
}
# Those ops are not expected to work
UNIMPLEMENTED_XFAILLIST = {
# Failures due to lack of op implementation on MPS backend
'login': None,
'linalg.eig': None,
'linalg.eigvals': None,
'put': None,
'nn.functional.conv_transpose3d': None,
'rounddecimals_neg_3': None,
'rounddecimals_3': None,
'rounddecimals_0': None,
'__rsub__': None,
'angle': None,
'cauchy_': None,
'cauchy': None,
'cholesky': None,
'cholesky_inverse': None,
'cholesky_solve': None,
'cummax': None,
'cummin': None,
'erfc': None,
'frexp': None,
'gcd': None,
'geqrf': None,
'nn.functional.grid_sample': None, # Unsupported Border padding mode
'heaviside': None,
'i0': None,
'igamma': None,
'igammac': None,
'index_copy': None,
'index_reduceprod': None,
'index_reducemean': None,
'index_reduceamax': None,
'index_reduceamin': None,
'isneginf': None,
'isposinf': None,
'kthvalue': None,
'lcm': None,
'linalg.cholesky': None,
'linalg.cholesky_ex': None,
'linalg.cond': None,
'linalg.detsingular': None,
'linalg.det': None,
'linalg.eigh': None,
'linalg.eigvalsh': None,
'linalg.householder_product': None,
'linalg.ldl_factor': None,
'linalg.ldl_factor_ex': None,
'linalg.ldl_solve': None,
'linalg.lstsq': None,
'linalg.lstsqgrad_oriented': None,
'linalg.lu': None,
'linalg.lu_factor_ex': None,
'linalg.lu_solve': None,
'linalg.matrix_norm': [torch.float32],
'linalg.norm': [torch.float32],
'linalg.normsubgradients_at_zero': [torch.float32],
'linalg.qr': None,
'linalg.slogdet': None,
'linalg.solve': None,
'linalg.solve_ex': None,
'linalg.svdvals': None,
'linalg.tensorsolve': None,
'linalg.vecdot': None,
'logcumsumexp': None,
'logdet': None,
'lu': None,
'lu_solve': None,
'lu_unpack': None,
'masked.median': None,
'matrix_exp': None,
'mode': None,
'nanmedian': None,
'native_dropout_backward': None,
'normnuc': None,
'nn.functional.fractional_max_pool2d': None,
'nn.functional.fractional_max_pool3d': None,
'nn.functional.adaptive_avg_pool3d': None,
'nn.functional.adaptive_max_pool3d': None,
'nn.functional.interpolatearea': None,
'nn.functional.interpolatebicubic': None,
'nn.functional.interpolatetrilinear': None,
'nn.functional.max_unpool1dgrad': None,
'nn.functional.max_unpool2dgrad': None,
'nn.functional.max_unpool3dgrad': None,
'nn.functional.avg_pool3d': None,
'nn.functional.ctc_loss': None,
'nn.functional.embedding_bag': None,
'nn.functional.hardshrink': None,
'nn.functional.max_pool3d': None,
'nn.functional.max_unpool1d': None,
'nn.functional.max_unpool2d': None,
'nn.functional.max_unpool3d': None,
'nn.functional.multi_margin_loss': None,
'nn.functional.multilabel_margin_loss': None,
'nn.functional.pdist': None,
'nn.functional.rrelu': None,
'nn.functional.norm': None,
'ormqr': None,
'pca_lowrank': None,
'qr': None,
'rsub': None,
'scatter_reduceamax': None,
'scatter_reduceamin': None,
'scatter_reducemin': None,
'scatter_reducemean': None,
'scatter_reduceprod': None,
'scatter_reducesum': None,
'segment_reduce': None,
'_segment.reduce': None,
'segment.reduce': None,
'segment_reduce_offsets': None,
'_segment_reduce_offsets': None,
'_segment_reduce_lengths': None,
'_segment_reducelengths': None,
'_segment_reduceoffsets': None,
'sinc': None,
'sparse.mm': None,
'sparse.mmreduce': None,
'special.airy_ai': None,
'special.bessel_j0': None,
'special.bessel_j1': None,
'special.bessel_y0': None,
'special.bessel_y1': None,
'special.chebyshev_polynomial_t': None,
'special.chebyshev_polynomial_u': None,
'special.entr': None,
'special.erfcx': None,
'special.hermite_polynomial_h': None,
'special.hermite_polynomial_he': None,
'special.i0e': None,
'special.i1': None,
'special.i1e': None,
'special.laguerre_polynomial_l': None,
'special.log_ndtr': None,
'special.modified_bessel_i0': None,
'special.modified_bessel_i1': None,
'special.modified_bessel_k0': None,
'special.modified_bessel_k1': None,
'special.ndtri': None,
'special.scaled_modified_bessel_k0': None,
'special.scaled_modified_bessel_k1': None,
'special.spherical_bessel_j0': None,
'special.xlog1py': None,
'special.zeta': None,
'svd_lowrank': None,
'symeig': None,
'take': None,
'to': None,
'to_sparse': None,
'unique': None,
'vdot': None,
'segment_reduce_': None,
'_upsample_bilinear2d_aa': None,
'geometric' : None,
'geometric_': None,
'log_normal_': None,
'log_normal': None,
'cdouble': None,
'double': None,
'nn.functional.softminwith_dtype': None,
'log_softmaxwith_dtype': None,
'softmaxwith_dtype': None,
'float_power': None,
'full_like': None,
'linalg.matrix_rankhermitian': None,
'linalg.pinvhermitian': None,
'nonzero_static': None,
# MPS: input sizes must be divisible by output sizes
'nn.functional.adaptive_avg_pool1d': None,
'nn.functional.adaptive_avg_pool2d': None,
# Unsupported dtypes
# bmm is not supported for integral types
'nn.functional.bilinear': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'ones_like': None,
'zeros_like': None,
# Convolution for integral types is not supported on MPS
'nn.functional.conv1d': [torch.int64],
'nn.functional.conv2d': [torch.int64],
'nn.functional.conv3d': [torch.int64],
'nn.functional.conv_transpose1d': [torch.int64],
'nn.functional.conv_transpose2d': [torch.int64],
# Unsupported dtypes
'dot': [torch.int64],
'histc': [torch.float16],
'index_add': [torch.int64],
'log1p': [torch.int64],
'sigmoid': [torch.int64],
'atan2': [torch.int64],
# GEMM on MPS is not supported for integral types
'nn.functional.linear': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'__rmatmul__': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'addmmdecomposed': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'addbmm': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'addmm': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'addmv': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'baddbmm': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'mm': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'bmm': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'einsum': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'inner': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'linalg.multi_dot': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'matmul': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'mat': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'mv': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'tensordot': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'unravel_index': [torch.int32, torch.int64],
# new_zeros/new_ones: Cannot convert a MPS Tensor to float64 dtype as
# the MPS framework doesn't support float64
'new_zeros': [torch.bool, torch.float16, torch.float32, torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'new_ones': [torch.bool, torch.float16, torch.float32, torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'new_full': [torch.bool, torch.float16, torch.float32, torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
# returned output on CPU is float64
'bincount': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
# trunc_tensor not working properly for float16
'divtrunc_rounding': [torch.float16],
'fmod': [torch.float16],
# round not working properly for float16
'round': [torch.float16],
# atomic operations not supported
'_unsafe_masked_index_put_accumulate': [torch.bool, torch.int8, torch.uint8, torch.float16, torch.int16, torch.int64],
}
if product_version < 14.0:
# FFT and BFloat16 support was added in MacOS 14
UNIMPLEMENTED_XFAILLIST.update({
'bfloat16': None,
'fft.fft': None,
'fft.fft2': None,
'fft.fftn': None,
'fft.hfft': None,
'fft.hfft2': None,
'fft.hfftn': None,
'fft.ifft': None,
'fft.ifft2': None,
'fft.ifftn': None,
'fft.ihfft': None,
'fft.ihfft2': None,
'fft.ihfftn': None,
'fft.irfft': None,
'fft.irfft2': None,
'fft.irfftn': None,
'fft.rfft': None,
'fft.rfft2': None,
'fft.rfftn': None,
'stft': None,
# Error in TestConsistencyCPU.test_output_match_isin_cpu fails for integers,
# not reproducible in later OS. Added assert to op if used in < 14.0
'isin': [torch.int64, torch.int32, torch.int16, torch.uint8, torch.int8],
'nn.functional.max_pool2d': [torch.uint8],
})
if product_version < 15.0:
UNIMPLEMENTED_XFAILLIST.update({
'quantile': None,
'nanquantile': None,
})
UNDEFINED_XFAILLIST = {
# Top 60 operators
# topk fails with duplicate indices
'topk': [torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
# Failures due to random output that they generate using
# Philox engine causing mismatch with CPU results
'multinomial': [torch.float16, torch.float32], # random results
'uniform': [torch.float16, torch.float32],
'rand_like': [torch.float16, torch.float32],
'randint_like': [torch.float16, torch.float32, torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'randn_like': [torch.float16, torch.float32],
'bernoulli': [torch.float16, torch.float32],
'exponential': [torch.float16, torch.float32],
'nn.functional.feature_alpha_dropoutwith_train': [torch.float16, torch.float32],
'normal': [torch.float16, torch.float32, torch.float16, torch.float32],
'normalin_place': [torch.float16, torch.float32],
'normalnumber_mean': [torch.float16, torch.float32],
'nn.functional.alpha_dropout': [torch.float16, torch.float32],
'nn.functional.dropout': [torch.float16, torch.float32],
'nn.functional.dropout2d': [torch.float16, torch.float32],
'nn.functional.dropout3d': [torch.float16, torch.float32],
# See https://github.com/pytorch/pytorch/issues/111479
'nn.functional.multi_head_attention_forward': [torch.float32, torch.float16],
# duplicate indices are used in the testcase - undefined behaviour
'index_put': None,
# zero to negative integer powers are undefined
'__rpow__': [torch.int8, torch.int16, torch.int32, torch.int64],
'resize_': [torch.float16, torch.float32],
'resize_as_': [torch.float16, torch.float32],
# CPU Errors:
'addr': [torch.bool, torch.int16, torch.int32,
torch.int64, torch.uint8, torch.int8], # "addmv_impl_cpu" not implemented for 'Half'
'as_stridedpartial_views': [torch.bool, torch.float16, torch.float32, torch.int16,
torch.int32, torch.int64, torch.uint8, torch.int8], # cpu result off, showing random values
'as_strided_partial_views': [torch.bool, torch.float16, torch.float32, torch.int16,
torch.int32, torch.int64, torch.uint8, torch.int8], # cpu result off, showing random values
# random results
# mps vs cpu:
# Mismatched elements: 40 / 96 (41.7%)
# Greatest absolute difference: 17.892311096191406 at index (1, 0, 2) (up to 1e-05 allowed)
# Greatest relative difference: inf at index (1, 0, 0) (up to 1.3e-06 allowed)
# cuda(2.0.0.dev20230301+cu117) vs cpu:
# Mismatched elements: 56 / 96 (58.3%)
# Greatest absolute difference: 17.892311096191406 at index (1, 0, 2) (up to 1e-05 allowed)
# Greatest relative difference: inf at index (1, 0, 0) (up to 1.3e-06 allowed)
'nn.functional.scaled_dot_product_attention': [torch.float32, torch.float16],
# float output for float16 input on MPS
'logit': [torch.float16],
}
ON_MPS_XFAILLIST = {
# Failures due to lack of implementation of downstream functions on MPS backend
# TODO: remove these once downstream function 'aten::_linalg_svd.U' have been implemented
'linalg.matrix_rank': None,
}
EMPTY_OPS_SKIPLIST = {
# Fill tensors with uninitialized data, causing mismatch with CPU.
# They occasionally match, thus skipping them.
# See https://github.com/pytorch/pytorch/issues/100175
'new_empty': [torch.bool, torch.float16, torch.float32, torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'new_empty_strided': [torch.bool, torch.float16, torch.float32, torch.int16,
torch.int32, torch.int64, torch.uint8, torch.int8],
'empty_strided': [torch.bool, torch.float16, torch.float32, torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
# CPU: empty is returning all 0's and there is a mismatch with MPS
# allocation (MacOS 13). According to
# https://pytorch.org/docs/2.0/generated/torch.empty.html
'empty': [torch.bool, torch.float16, torch.float32, torch.int16,
torch.int32, torch.int64, torch.uint8, torch.int8],
'empty_like': [torch.bool, torch.float16, torch.float32, torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],
'empty_permuted': [torch.bool, torch.float16, torch.float32, torch.int16,
torch.int32, torch.int64, torch.uint8, torch.int8],
}
SKIPLIST = {
# Unsupported
# input types 'tensor<1x3x9x9xf16>' and 'tensor<1xf32>' are not broadcast compatible
'nn.functional.avg_pool2d': [torch.float16],
# This doesn't work on M1, but is partially working on M2 with the exception of torch.float16
'nn.functional.conv3d': None,
}
def addDecorator(op, d) -> None:
op.decorators = list(op.decorators) if op.decorators is not None else []
op.decorators.append(d)
for op in ops:
key = op.name + op.variant_test_name
if key in EMPTY_OPS_SKIPLIST:
addDecorator(op, DecorateInfo(
unittest.skip("Skipping empty ops."),
dtypes=EMPTY_OPS_SKIPLIST[key]))
if key in SKIPLIST:
addDecorator(op, DecorateInfo(unittest.skip("Skipped!"), dtypes=SKIPLIST[key]))
for xfaillist in [UNIMPLEMENTED_XFAILLIST, UNDEFINED_XFAILLIST, ON_MPS_XFAILLIST]:
if key in xfaillist:
addDecorator(op, DecorateInfo(
unittest.expectedFailure,
dtypes=xfaillist[key]))
if key in MACOS_BEFORE_14_4_XFAILLIST and (product_version < 14.4):
addDecorator(op, DecorateInfo(
unittest.expectedFailure,
dtypes=MACOS_BEFORE_14_4_XFAILLIST[key]))
if key in MACOS_BEFORE_13_3_XFAILLIST and (torch.backends.mps.is_macos13_or_newer() and product_version < 13.3):
addDecorator(op, DecorateInfo(
unittest.expectedFailure,
dtypes=MACOS_BEFORE_13_3_XFAILLIST[key]))
if key in MACOS_AFTER_13_1_XFAILLIST and torch.backends.mps.is_macos13_or_newer(2):
addDecorator(op, DecorateInfo(
unittest.expectedFailure,
dtypes=MACOS_AFTER_13_1_XFAILLIST[key]))
if key in MACOS_13_3_XFAILLIST and (product_version >= 13.3):
addDecorator(op, DecorateInfo(
unittest.expectedFailure,
dtypes=MACOS_13_3_XFAILLIST[key]))
if key in MACOS_12_3_XFAILLIST and (not torch.backends.mps.is_macos13_or_newer()):
addDecorator(op, DecorateInfo(
unittest.expectedFailure,
dtypes=MACOS_12_3_XFAILLIST[key]))
# If ops is not supported for complex types, expect it to fail
if key not in SUPPORTED_COMPLEX_OPS and (key not in AFTER_MACOS_14_0_SUPPORTED_COMPLEX_OPS or product_version < 14.0):
addDecorator(op, DecorateInfo(unittest.expectedFailure, dtypes=[torch.complex32, torch.complex64]))
yield op
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_mps.py
|
mps_ops_error_inputs_modifier
|
# Same logic as test_cuda.py
if not torch.backends.mps.is_available():
print('MPS not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
NNTestCase = object # noqa: F811
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]))
# Determine whether to enable MPS memory leak check (uses same code as CUDA).
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
|
def mps_ops_error_inputs_modifier(ops):
# Error input samples do not take a dtype argument.
XFAILLIST = {
# Exceptions are not raised
'__rmod__',
'__rsub__',
'__rpow__',
'bernoulli',
'clamp_max',
'clamp_min',
'masked_scatter',
# unsupported float64 dtype
'cat',
'complex',
'multinomial',
'nn.functional.conv1d',
'nn.functional.conv2d',
'nn.functional.conv3d',
'gather',
'scatter',
'scatter_add',
# unsupported complex dtypes
'masked_fill',
# MPS does not support tensor dimensions > 16
'amax',
'amin',
'aminmax',
# memory overlapping checks
'index_select',
# unimplemented
'logcumsumexp',
}
def addDecorator(op, d) -> None:
op.decorators = list(op.decorators) if op.decorators is not None else []
op.decorators.append(d)
for op in ops:
if op.error_inputs_func is None:
continue
key = op.name + op.variant_test_name
if key in XFAILLIST:
addDecorator(op, DecorateInfo(unittest.expectedFailure))
yield op
# Same logic as test_cuda.py
if not torch.backends.mps.is_available():
print('MPS not available, skipping tests', file=sys.stderr)
TestCase = NoTest # noqa: F811
NNTestCase = NoTest # noqa: F811
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]))
# Determine whether to enable MPS memory leak check (uses same code as CUDA).
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_mps.py
|
__exit__
|
def __exit__(self, exec_type, exec_value, traceback):
# Don't check for leaks if an exception was thrown
if exec_type is not None:
return
# Compares caching allocator before/after statistics
# An increase in allocated memory is a discrepancy indicating a possible memory leak
discrepancy_detected = False
caching_allocator_mem_allocated = torch.mps.current_allocated_memory()
if caching_allocator_mem_allocated > self.caching_allocator_before:
discrepancy_detected = True
# Short-circuits if no discrepancy detected
if not discrepancy_detected:
return
# Validates the discrepancy persists after garbage collection and
# is confirmed by the driver API
gc.collect()
torch.mps.empty_cache()
discrepancy_detected = True
# Query memory multiple items to ensure leak was not transient
for n in range(3):
caching_allocator_mem_allocated = torch.mps.current_allocated_memory()
driver_mem_allocated = torch.mps.driver_allocated_memory()
caching_allocator_discrepancy = False
driver_discrepancy = False
if caching_allocator_mem_allocated > self.caching_allocator_before:
caching_allocator_discrepancy = True
if driver_mem_allocated > self.driver_before:
driver_discrepancy = True
if not(caching_allocator_discrepancy or driver_discrepancy):
# Leak was false positive, exit loop
discrepancy_detected = False
break
if caching_allocator_discrepancy and not driver_discrepancy:
# Just raises a warning if the leak is not validated by the driver API
msg = ("MPS caching allocator reports a memory leak not "
"verified by the driver API in {}! "
"Caching allocator allocated memory was {} and is now reported as {}. "
"MPS driver allocated memory was {} and is now {}.").format(
self.name, self.caching_allocator_before,
caching_allocator_mem_allocated, self.driver_before, driver_mem_allocated)
warnings.warn(msg)
elif caching_allocator_discrepancy and driver_discrepancy:
# A caching allocator discrepancy validated by the driver API is a failure
msg = ("MPS driver API confirmed a leak in {}! "
"Caching allocator allocated memory was {} and is now reported as {}. "
"MPS driver allocated memory was {} and is now {}.").format(
self.name, self.caching_allocator_before, caching_allocator_mem_allocated,
self.driver_before, driver_mem_allocated)
raise RuntimeError(msg)
# Expand TestCase class with Memory Leak Detection on MPS device
|
def __exit__(self, exec_type, exec_value, traceback):
# Don't check for leaks if an exception was thrown
if exec_type is not None:
return
# Compares caching allocator before/after statistics
# An increase in allocated memory is a discrepancy indicating a possible memory leak
discrepancy_detected = False
caching_allocator_mem_allocated = torch.mps.current_allocated_memory()
if caching_allocator_mem_allocated > self.caching_allocator_before:
discrepancy_detected = True
# Short-circuits if no discrepancy detected
if not discrepancy_detected:
return
# Validates the discrepancy persists after garbage collection and
# is confirmed by the driver API
gc.collect()
torch.mps.empty_cache()
discrepancy_detected = True
# Query memory multiple items to ensure leak was not transient
for n in range(3):
caching_allocator_mem_allocated = torch.mps.current_allocated_memory()
driver_mem_allocated = torch.mps.driver_allocated_memory()
caching_allocator_discrepancy = False
driver_discrepancy = False
if caching_allocator_mem_allocated > self.caching_allocator_before:
caching_allocator_discrepancy = True
if driver_mem_allocated > self.driver_before:
driver_discrepancy = True
if not (caching_allocator_discrepancy or driver_discrepancy):
# Leak was false positive, exit loop
discrepancy_detected = False
break
if caching_allocator_discrepancy and not driver_discrepancy:
# Just raises a warning if the leak is not validated by the driver API
msg = ("MPS caching allocator reports a memory leak not "
f"verified by the driver API in {self.name}! "
f"Caching allocator allocated memory was {self.caching_allocator_before} "
f"and is now reported as {caching_allocator_mem_allocated}. "
f"MPS driver allocated memory was {self.driver_before} and is now {driver_mem_allocated}.")
warnings.warn(msg)
elif caching_allocator_discrepancy and driver_discrepancy:
# A caching allocator discrepancy validated by the driver API is a failure
msg = (f"MPS driver API confirmed a leak in {self.name}! "
f"Caching allocator allocated memory was {self.caching_allocator_before} "
f"and is now reported as {caching_allocator_mem_allocated}. "
f"MPS driver allocated memory was {self.driver_before} and is now {driver_mem_allocated}.")
raise RuntimeError(msg)
|
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import pprint
import copy
import gc
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI,
TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings)
from torch.testing import make_tensor
from torch.testing._comparison import TensorLikePair
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS
from torch.testing._internal.common_nn import NNTestCase
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
class MpsMemoryLeakCheck():
import numpy as np
import numpy as np
import numpy as np
import torch
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
class MpsMemoryLeakCheck:
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mps.py
|
test_matmul_autocast
|
# Expand TestCase class with Memory Leak Detection on MPS device
class TestCaseMPS(TestCase):
_do_mps_memory_leak_check = True
|
def test_matmul_autocast(self):
autocast_tensor_A = torch.rand((8, 8), device="mps")
autocast_tensor_B = torch.rand((8, 8), device="mps")
tensor_A = autocast_tensor_A.clone().detach()
tensor_B = autocast_tensor_B.clone().detach()
autocast_output_tensor = torch.empty(8, 8)
output_tensor = autocast_output_tensor.clone().detach()
with torch.autocast(device_type="mps"):
autocast_output_tensor = torch.mm(autocast_tensor_A, autocast_tensor_B)
autocast_output_tensor = torch.mm(autocast_tensor_A, autocast_output_tensor)
output_tensor = torch.mm(tensor_A, tensor_B)
output_tensor = torch.mm(tensor_A, output_tensor)
self.assertEqual(autocast_output_tensor.dtype, torch.float16, "Autocast output tensor was not expected type float16")
self.assertEqual(autocast_output_tensor,
output_tensor.to(torch.float16),
f"Autocast & non-autocast tensors did not match, \
got:\n{autocast_output_tensor} \n{output_tensor.to(torch.float16)}")
# Expand TestCase class with Memory Leak Detection on MPS device
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
class TestAutocastMPS(TestCase):
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_mps.py
|
_avg_pool2d
|
def _avg_pool2d(self, x, kernel_size):
size = reduce((lambda x, y: x * y), kernel_size)
return self._sum_pool2d(x, kernel_size) / size
|
def _avg_pool2d(self, x, kernel_size):
size = reduce(operator.mul, kernel_size) # noqa: F821
return self._sum_pool2d(x, kernel_size) / size
|
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import pprint
import copy
import gc
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI,
TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings)
from torch.testing import make_tensor
from torch.testing._comparison import TensorLikePair
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS
from torch.testing._internal.common_nn import NNTestCase
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
class TestAvgPool(TestCaseMPS):
import numpy as np
import numpy as np
import numpy as np
import torch
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
class TestAvgPool(TestCaseMPS):
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_mps.py
|
_avg_pool3d
|
def _avg_pool3d(self, x, kernel_size):
size = reduce((lambda x, y: x * y), kernel_size)
return self._sum_pool3d(x, kernel_size) / size
|
def _avg_pool3d(self, x, kernel_size):
size = reduce(operator.mul, kernel_size) # noqa: F821
return self._sum_pool3d(x, kernel_size) / size
|
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import pprint
import copy
import gc
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, run_tests, TestCase, download_file, IS_CI,
TEST_WITH_UBSAN, dtype_abbrs, skipIfSlowGradcheckEnv, TEST_WITH_ASAN, suppress_warnings)
from torch.testing import make_tensor
from torch.testing._comparison import TensorLikePair
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests, onlyMPS
from torch.testing._internal.common_nn import NNTestCase
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
class TestAvgPool(TestCaseMPS):
import numpy as np
import numpy as np
import numpy as np
import torch
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
|
import io
import platform
import sys
import math
import random
import unittest
import warnings
import subprocess
import tempfile
import os
import copy
import gc
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from collections import defaultdict
from torch import inf
from torch.nn import Buffer, Parameter
from torch.testing._internal import opinfo
from torch.testing._internal.common_utils import \
(gradcheck, gradgradcheck, parametrize, run_tests, TestCase, download_file, IS_CI,
NoTest, skipIfSlowGradcheckEnv, suppress_warnings, serialTest, instantiate_parametrized_tests)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import get_all_dtypes, integral_types
import torch.backends.mps
from torch.distributions import Uniform, Exponential
from functools import partial
from torch.testing._internal.common_methods_invocations import (
op_db,
DecorateInfo,
UnaryUfuncInfo,
ReductionOpInfo,
SpectralFuncInfo,
BinaryUfuncInfo,
)
from torch.testing._internal.common_device_type import ops, dtypes, instantiate_device_type_tests, OpDTypes
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_quantization import _group_quantize_tensor, _dynamically_quantize_per_channel
import numpy as np
import torch
import torch.utils._pytree as pytree
from itertools import product
import operator
test_consistency_op_db = copy.deepcopy(op_db)
test_error_inputs_op_db = copy.deepcopy(op_db)
_ref_test_ops = tuple(
filter(
lambda op: not isinstance(
op, (UnaryUfuncInfo, ReductionOpInfo, SpectralFuncInfo, BinaryUfuncInfo)
)
and op.ref is not None,
op_db,
)
)
product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1)
total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]))
TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1'
class TestAvgPool(TestCaseMPS):
import numpy as np
import numpy as np
import numpy as np
import torch
from torch.utils.checkpoint import checkpoint
import numpy as np
from torch.serialization import SourceChangeWarning
from torch.serialization import SourceChangeWarning
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.