library
stringclasses 1
value | test_file
stringclasses 785
values | test_function
stringlengths 1
295
| before
stringlengths 0
448k
| after
stringlengths 0
487k
| context_before
stringclasses 947
values | context_after
stringlengths 0
16.3k
| commit_before
stringclasses 1
value | commit_after
stringclasses 1
value | change_type
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|---|
torch
|
test/test_torch.py
|
test_storage_cycle_via_dict
|
def test_storage_cycle_via_dict(self):
m1, t1 = Tracker.make()
x = torch.UntypedStorage(2)
x._tracker = t1
del t1
m2, t2 = Tracker.make()
y = torch.UntypedStorage(2)
y._tracker = t2
del t2
x._loop = y
y._loop = x
# C++ reference should keep the cycle live!
# This exercise THPVariable_subtype_traverse
# NB: Because z.grad is a reference done entirely in C++, cycles
# involving it directly are NOT broken by Python GC; you've
# set up a good old C++ reference cycle which we cannot safely
# break (because C++ references are allowed to be accessed
# multithreaded-ly) (TODO: except maybe if you can prove that
# only Python has access to the C++ object, in which case you can
# also prove that no multithreaded access occurs)
z = torch.UntypedStorage(2)
z.grad = x
del x
del y
gc.collect()
self.assertFalse(m1[0])
self.assertFalse(m2[0])
with disable_gc():
del z
self.assertFalse(m1[0])
self.assertFalse(m2[0])
gc.collect()
self.assertTrue(m1[0])
self.assertTrue(m2[0])
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
test_scaled_dot_product_attention_math_with_negative_scale
|
def test_scaled_dot_product_attention_math_with_negative_scale(self, device, kernel: SDPBackend):
# https://github.com/pytorch/pytorch/issues/105190.
def ref(x):
v1 = torch.matmul(x, x.transpose(-1, -2))
v2 = v1 / -0.0001
v3 = v2.softmax(dim=-1)
v4 = torch.matmul(v3, x)
return v4
x = torch.randn(1, 3, 64, 64, device=device)
ref_result = ref(x)
with sdpa_kernel(backends=[kernel]):
sdp_math = torch.nn.functional.scaled_dot_product_attention(x, x, x, scale=-1.0 / 0.0001)
self.assertEqual(ref_result, sdp_math)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
class TestSDPA(NNTestCase):
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
ref
|
def ref(x):
v1 = torch.matmul(x, x.transpose(-1, -2))
v2 = v1 / -0.0001
v3 = v2.softmax(dim=-1)
v4 = torch.matmul(v3, x)
return v4
x = torch.randn(1, 3, 64, 64, device=device)
ref_result = ref(x)
with sdpa_kernel(backends=[kernel]):
sdp_math = torch.nn.functional.scaled_dot_product_attention(x, x, x, scale=-1.0 / 0.0001)
self.assertEqual(ref_result, sdp_math)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
construct_local_mask
|
def construct_local_mask(self, seqlen_q, seqlen_k, window_size, query_padding_mask, key_padding_mask, device):
# row_idx = rearrange(torch.arange(seqlen_q, device=device, dtype=torch.long), "s -> s 1")
row_idx = torch.arange(seqlen_q, device=device, dtype=torch.long).view(-1, 1)
col_idx = torch.arange(seqlen_k, device=device, dtype=torch.long)
sk = (
seqlen_k
if key_padding_mask is None
else key_padding_mask.sum(-1).view(-1, 1, 1, 1)
# else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1")
)
sq = (
seqlen_q
if query_padding_mask is None
else query_padding_mask.sum(-1).view(-1, 1, 1, 1)
# else rearrange(query_padding_mask.sum(-1), "b -> b 1 1 1")
)
if window_size[0] < 0:
return col_idx > row_idx + sk - sq + window_size[1]
else:
sk = torch.full_like(col_idx, seqlen_k) if key_padding_mask is None else sk
return torch.logical_or(
col_idx > torch.minimum(row_idx + sk - sq + window_size[1], sk),
col_idx < row_idx + sk - sq - window_size[0],
)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
class TestSDPACudaOnly(NNTestCase):
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
rand_nt
|
def rand_nt(shape):
batch, seq_len, num_heads, head_dim = shape
tensors = [6 * torch.rand((seq_len, 3 * num_heads * head_dim), device="cuda", dtype=torch.float32) - 3
for _ in range(batch)]
return (torch.nested.nested_tensor(tensors, device="cuda", dtype=torch.float32),
torch.nested.nested_tensor(tensors, device="cuda", dtype=torch.float16))
|
def rand_nt(shape):
batch, seq_len, num_heads, head_dim = shape
tensors = [6 * torch.rand((seq_len, 3 * num_heads * head_dim), device=device, dtype=torch.float32) - 3
for _ in range(batch)]
return (torch.nested.nested_tensor(tensors, device=device, dtype=torch.float32),
torch.nested.nested_tensor(tensors, device=device, dtype=torch.float16))
|
import contextlib
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
from torch.backends.cuda import sdp_kernel, SDPBackend
import torch.optim as optim
from torch.testing._internal.common_dtype import floating_types_and_half
from typing import Tuple
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
TEST_FAIRSEQ,
run_tests,
parametrize,
instantiate_parametrized_tests,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck
)
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import TEST_CUDA, SM80OrLater, PLATFORM_SUPPORTS_FUSED_SDPA
import fairseq.models.transformer as fairseq_transformer
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM86Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (8, 6)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_transformers.py
|
rand_tensor
|
def rand_tensor(*shape):
return torch.randn(shape, device=device, dtype=dtype)
# This test compares python and C++ implementations of SDP.
N, N_prime, L, S, E = 5, 2, 4, 3, 6
if input_dim == 3:
query = rand_tensor(N, L, E)
key = rand_tensor(N, S, E)
value = rand_tensor(N, S, E)
elif input_dim == 4:
query = rand_tensor(N, N_prime, L, E)
key = rand_tensor(N, N_prime, S, E)
value = rand_tensor(N, N_prime, S, E)
else:
self.fail(f'Invalid input_dim {input_dim} encountered in SDP test')
attn_mask = None
if attn_mask_dim is not None:
assert attn_mask_dim in [2, input_dim]
mask_size = (L, S) if attn_mask_dim == 2 else ((N, L, S) if input_dim == 3 else (N, N_prime, L, S))
attn_mask = (torch.ones(mask_size, device=device, dtype=torch.bool).tril() if is_causal
else torch.randint(0, 2, size=mask_size, device=device, dtype=torch.bool))
with freeze_rng_state():
# Python impl only supports float mask and 3D inputs.
attn_mask_float = attn_mask
if attn_mask_float is not None:
attn_mask_float = torch.zeros_like(attn_mask, dtype=query.dtype)
attn_mask_float.masked_fill_(attn_mask.logical_not(), float("-inf"))
q, k, v = query.view(-1, L, E), key.view(-1, S, E), value.view(-1, S, E)
a = attn_mask_float
if a is not None and attn_mask_dim > 3:
a = a.view(-1, L, S)
expected = sdp_ref(q, k, v, attn_mask=a, dropout_p=dropout_p)
if input_dim > 3:
expected = expected.view(-1, N_prime, L, E)
with freeze_rng_state():
if is_causal:
# NB: Don't pass attn_mask here
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, None, dropout_p, is_causal)
# Error case: both explicit attn_mask and is_causal are set
with self.assertRaisesRegex(RuntimeError,
"Explicit attn_mask should not be set when is_causal=True"):
torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask, dropout_p, is_causal)
else:
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask, dropout_p, is_causal)
self.assertEqual(actual, expected)
|
def rand_tensor(*shape):
return torch.randn(shape, device=device, dtype=dtype)
# This test compares python and C++ implementations of SDP.
N, N_prime, L, S, E = 5, 2, 4, 3, 6
if input_dim == 3:
query = rand_tensor(N, L, E)
key = rand_tensor(N, S, E)
value = rand_tensor(N, S, E)
elif input_dim == 4:
query = rand_tensor(N, N_prime, L, E)
key = rand_tensor(N, N_prime, S, E)
value = rand_tensor(N, N_prime, S, E)
else:
self.fail(f'Invalid input_dim {input_dim} encountered in SDP test')
attn_mask = None
if attn_mask_dim is not None:
assert attn_mask_dim in [2, input_dim]
mask_size = (L, S) if attn_mask_dim == 2 else ((N, L, S) if input_dim == 3 else (N, N_prime, L, S))
attn_mask = (torch.ones(mask_size, device=device, dtype=torch.bool).tril() if is_causal
else torch.randint(0, 2, size=mask_size, device=device, dtype=torch.bool))
with freeze_rng_state():
# Python impl only supports float mask and 3D inputs.
attn_mask_float = attn_mask
if attn_mask_float is not None:
attn_mask_float = torch.zeros_like(attn_mask, dtype=query.dtype)
attn_mask_float.masked_fill_(attn_mask.logical_not(), float("-inf"))
q, k, v = query.view(-1, L, E), key.view(-1, S, E), value.view(-1, S, E)
a = attn_mask_float
if a is not None and attn_mask_dim > 3:
a = a.view(-1, L, S)
expected = sdp_ref(q, k, v, attn_mask=a, dropout_p=dropout_p)
if input_dim > 3:
expected = expected.view(-1, N_prime, L, E)
with freeze_rng_state():
if is_causal:
# NB: Don't pass attn_mask here
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, None, dropout_p, is_causal)
# Error case: both explicit attn_mask and is_causal are set
with self.assertRaisesRegex(RuntimeError,
"Explicit attn_mask should not be set when is_causal=True"):
torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask, dropout_p, is_causal)
else:
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask, dropout_p, is_causal)
# This test the fully masked out rows case
if torch.isnan(expected).any():
row_sums = attn_mask.sum(dim=-1)
masked_out_rows = (row_sums == 0)
for _ in range((input_dim - attn_mask_dim) - 1):
masked_out_rows = masked_out_rows.unsqueeze(0)
masked_out_rows = masked_out_rows.expand(expected.shape[:-1])
# Slice out the fully masked rows from expected and actual
expected_masked_out = expected[masked_out_rows]
actual_masked_out = actual[masked_out_rows]
expected_all_nan = torch.isnan(expected_masked_out).all()
actual_all_zero = (actual_masked_out.abs().sum() == 0)
self.assertTrue(expected_all_nan)
self.assertTrue(actual_all_zero)
return
self.assertEqual(actual, expected)
|
import contextlib
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
from torch.backends.cuda import sdp_kernel, SDPBackend
import torch.optim as optim
from torch.testing._internal.common_dtype import floating_types_and_half
from typing import Tuple
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
TEST_FAIRSEQ,
run_tests,
parametrize,
instantiate_parametrized_tests,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck
)
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import TEST_CUDA, SM80OrLater, PLATFORM_SUPPORTS_FUSED_SDPA
import fairseq.models.transformer as fairseq_transformer
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM86Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (8, 6)
class TestSDPA(NNTestCase):
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_transformers.py
|
ones_tensor
|
def ones_tensor(*shape):
return torch.ones(shape, device=device, dtype=torch.float32).to(device)
S, L, E, H = 1, 2, 4, 1
qkv = ones_tensor(S, L, E)
mha = nn.MultiheadAttention(E, H).to(device)
mha.in_proj_weight = Parameter(torch.ones((E * 3, E), device=device))
mha.out_proj.weight = Parameter(torch.ones((E, E), device=device))
expected = torch.ones(size=(S, L, E)).to(device) * 16
for kernel in kernels:
with torch.backends.cuda.sdp_kernel(
enable_math=(kernel == 'math'),
enable_flash=(kernel == 'flash'),
enable_mem_efficient=(kernel == 'meff')
):
actual, _ = mha(qkv, qkv, qkv, need_weights=False, is_causal=True)
self.assertTrue(torch.equal(actual, expected))
if kernel != 'math':
# fails with embedding size not multiple of 4
with self.assertRaisesRegex(RuntimeError, "No available kernel"):
qkv_f, mha_f = ones_tensor(S, L, 2), nn.MultiheadAttention(2, H).to(device)
_ = mha_f(qkv_f, qkv_f, qkv_f, need_weights=False, is_causal=True)
torch.cuda.synchronize()
|
def ones_tensor(*shape):
return torch.ones(shape, dtype=torch.float32)
S, L, E, H = 1, 2, 4, 1
qkv = ones_tensor(S, L, E)
mha = nn.MultiheadAttention(E, H)
mha.in_proj_weight = Parameter(torch.ones((E * 3, E)))
mha.out_proj.weight = Parameter(torch.ones((E, E)))
qkv = qkv.to(float)
kpm = ones_tensor(S, L) * float("-inf")
am = ones_tensor(L, L).to(bool)
|
import contextlib
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
from torch.backends.cuda import sdp_kernel, SDPBackend
import torch.optim as optim
from torch.testing._internal.common_dtype import floating_types_and_half
from typing import Tuple
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
TEST_FAIRSEQ,
run_tests,
parametrize,
instantiate_parametrized_tests,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck
)
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import TEST_CUDA, SM80OrLater, PLATFORM_SUPPORTS_FUSED_SDPA
import fairseq.models.transformer as fairseq_transformer
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM86Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (8, 6)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_transformers.py
|
_get_mem_eff_drop_mask
|
def _get_mem_eff_drop_mask(batch_size, n_heads, q_len, kv_len, p, seed, offset, device=device):
mask = torch.empty((batch_size, n_heads, q_len, kv_len), device=device, dtype=torch.float32)
rand_uniform = torch._fill_mem_eff_dropout_mask_(mask, p, seed, offset)
mask = (rand_uniform > p).to(torch.float32)
return mask
if max(seq_len_q, seq_len_k) >= 2048 and torch.cuda.get_device_properties('cuda').total_memory < 40 * 2**30:
unittest.skip("Reference implementation OOM")
return
if TEST_WITH_ROCM and seq_len_q * seq_len_k * head_dim * batch_size > 1024 * 1024 * 128:
torch.cuda.empty_cache() # Prevent memory fragmentation
if TEST_WITH_ROCM and is_causal and seq_len_q != seq_len_k:
self.skipTest("ROCm does not accept is_casual when seq_len_q != seq_len_k")
seed = 42
scale = scale if scale is None else (1 / head_dim)
n_heads = 4
query = torch.rand(batch_size, n_heads, seq_len_q, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, n_heads, seq_len_k, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, n_heads, seq_len_k, head_dim,
device=device, dtype=dtype, requires_grad=True)
higher_precision_dtype = torch.float64
query_ref, key_ref, value_ref = query_key_value_clones(query, key, value, dtype=higher_precision_dtype)
# Create real output
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
# Set the seed and run the kernel
torch.manual_seed(seed)
out = F.scaled_dot_product_attention(query, key, value, dropout_p=dropout_p, is_causal=is_causal, scale=scale)
if dropout_p == 0.0:
with sdpa_kernel(backends=[SDPBackend.MATH]):
# High Precision Math Reference
out_ref = F.scaled_dot_product_attention(query_ref, key_ref, value_ref,
dropout_p=dropout_p, is_causal=is_causal, scale=scale)
# Low Precision Math Reference
out_lp_ref = F.scaled_dot_product_attention(query, key, value,
dropout_p=dropout_p, is_causal=is_causal, scale=scale)
else:
if seq_len_q > 1024:
self.skipTest("Will call _fill_mem_eff_dropout_mask with too many threads!")
# Create the dropout_mask
torch.manual_seed(seed)
dropout_mask = _get_mem_eff_drop_mask(batch_size, n_heads, seq_len_q, seq_len_k, dropout_p, seed, 0, device=device)
# High Precision Math Reference
out_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref, key_ref, value_ref, dropout_p=dropout_p, is_causal=is_causal, scale=scale, dropout_mask=dropout_mask)[0]
# Low Precision Math Reference
out_lp_ref = torch.ops.aten._scaled_dot_product_attention_math(
query, key, value, dropout_p=dropout_p, is_causal=is_causal, scale=scale,
dropout_mask=dropout_mask)[0]
upstream_grad = torch.rand_like(out, requires_grad=False)
grads = torch.autograd.grad(out, (query, key, value), upstream_grad)
grads_ref_lp = torch.autograd.grad(out_lp_ref, (query, key, value), upstream_grad)
grads_ref = torch.autograd.grad(out_ref, (query_ref, key_ref, value_ref), upstream_grad)
fudge_factors = {
'out': 3.0 ,
'grad_query': 150.0 ,
'grad_key': 25.0,
'grad_value': 8.5,
}
if TEST_WITH_ROCM:
fudge_factors['grad_key'] = 45.0
fudge_factors['grad_query'] = 360.0
if seq_len_k >= 1024:
fudge_factors['grad_key'] = 70.0
if seq_len_k >= 2048:
fudge_factors['grad_key'] = 160.0
fudge_factors['grad_query'] = 650.0
if dtype == torch.float32:
fudge_factors['grad_key'] = 90.0
check_out_and_grad(
(out_ref, out_lp_ref, out),
*zip(grads_ref, grads_ref_lp, grads),
fudge_factors=fudge_factors,
)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
get_dropout_mask
|
n_heads = 4
query = torch.rand(batch_size, n_heads, seq_len_q, head_dim,
device="cuda", dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, n_heads, seq_len_k, head_dim, device="cuda",
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, n_heads, seq_len_k, head_dim,
device="cuda", dtype=dtype, requires_grad=True)
# Run the math kernel on low precision references
query_ref_lp = query.clone().detach().requires_grad_(True)
key_ref_lp = key.clone().detach().requires_grad_(True)
value_ref_lp = value.clone().detach().requires_grad_(True)
higher_precision_dtype = torch.float64 if dtype == torch.float32 else torch.float32
query_ref = query.clone().detach().to(higher_precision_dtype).requires_grad_(True)
key_ref = key.clone().detach().to(higher_precision_dtype).requires_grad_(True)
value_ref = value.clone().detach().to(higher_precision_dtype).requires_grad_(True)
# Create real output
with sdp_kernel(enable_mem_efficient=True, enable_flash=False, enable_math=False):
# See check_gpu_sm86_head_dim_128 in pytorch/aten/src/ATen/native/transformers/cuda/sdp_utils.h
if isSM86Device and head_dim == 128:
self.assertRaises(RuntimeError, lambda: F.scaled_dot_product_attention(query, key, value,
dropout_p=dropout_p, is_causal=is_causal))
return
|
def get_dropout_mask(output, fused_kernel, batch_size, n_heads, q_len, kv_len, dropout_p, device=device):
if fused_kernel == SDPBackend.EFFICIENT_ATTENTION:
output_seed, output_offset = output_tuple[2], output_tuple[3]
output_seed = output_seed.item()
output_offset = output_offset.item()
return _get_mem_eff_drop_mask(batch_size, n_heads, q_len, kv_len,
dropout_p, output_seed, output_offset, device=device)
else:
# Build dropout_mask
dbug_mask = output_tuple[-1]
query_padding_mask = torch.ones(
batch_size, seq_len_q, device=device, dtype=torch.bool)
key_padding_mask = torch.ones(
batch_size, seq_len_k, device=device, dtype=torch.bool)
softmax_mask = self.convert_flash_attn_S_to_softmax(
dbug_mask, seq_len_q, seq_len_k, query_padding_mask, key_padding_mask,
causal=is_causal)[:, :, :seq_len_q, :seq_len_k]
dropout_mask = softmax_mask >= 0
return dropout_mask
if fused_kernel == SDPBackend.FLASH_ATTENTION and is_causal and seq_len_q != seq_len_k:
self.skipTest("Flash V2 does not accept is_casual when seq_len_q != seq_len_k")
if TEST_WITH_ROCM and is_causal and seq_len_q != seq_len_k:
self.skipTest("ROCm does not accept is_casual when seq_len_q != seq_len_k")
seed = 42
n_heads = 4
query = torch.rand(batch_size, n_heads, seq_len_q, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, n_heads, seq_len_k, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, n_heads, seq_len_k, head_dim,
device=device, dtype=dtype, requires_grad=True)
fused_op = (torch.ops.aten._scaled_dot_product_efficient_attention
if fused_kernel == SDPBackend.EFFICIENT_ATTENTION else torch.ops.aten._scaled_dot_product_flash_attention
if fused_kernel == SDPBackend.FLASH_ATTENTION else torch.ops.aten._scaled_dot_product_cudnn_attention)
higher_precision_dtype = torch.float64 if dtype == torch.float32 else torch.float32
query_ref, key_ref, value_ref = query_key_value_clones(query, key, value, dtype=higher_precision_dtype)
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
# Set the global seed before capture
torch.manual_seed(seed)
kwargs = {"dropout_p": dropout_p, "is_causal": is_causal}
if fused_kernel == SDPBackend.EFFICIENT_ATTENTION:
kwargs["compute_log_sumexp"] = True
kwargs["attn_bias"] = None
if fused_kernel == SDPBackend.FLASH_ATTENTION:
kwargs['return_debug_mask'] = dropout_p > 0.0
if fused_kernel == SDPBackend.CUDNN_ATTENTION:
kwargs["compute_log_sumexp"] = True
kwargs["attn_bias"] = None
if "return_debug_mask" in kwargs:
kwargs.pop("return_debug_mask")
with torch.cuda.stream(s):
# Create real output
output_tuple = fused_op(query, key, value, **kwargs)
torch.cuda.current_stream().wait_stream(s)
out = output_tuple[0]
upstream_grad = torch.rand_like(out, requires_grad=False)
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
out.backward(upstream_grad)
for x in (query, key, value):
x.grad = None
g = torch.cuda.CUDAGraph()
# Create real output
with torch.cuda.graph(g):
tmp = torch.rand_like(query, device=query.device) # test non-zero intragraph offset
# Create real output
output_tuple = fused_op(query, key, value, **kwargs)
assert all(not isinstance(o, torch.Tensor) or o.is_cuda for o in output_tuple)
g.replay()
out_first = output_tuple[0].clone()
g.replay()
out = output_tuple[0]
if dropout_p == 0.0:
self.assertEqual(out_first, out, atol=0, rtol=0)
else:
# replays produce different results
self.assertNotEqual(out_first, out)
with sdpa_kernel(backends=[SDPBackend.MATH]):
if dropout_p == 0.0:
# High Precision Math Reference
out_ref = F.scaled_dot_product_attention(query_ref, key_ref, value_ref,
dropout_p=dropout_p, is_causal=is_causal)
# Low Precision Math Reference
out_lp_ref = F.scaled_dot_product_attention(query, key, value,
dropout_p=dropout_p, is_causal=is_causal)
# cuDNN attention doesn't support returning dropout mask
elif fused_kernel != SDPBackend.CUDNN_ATTENTION:
# Create the dropout_mask
dropout_mask = get_dropout_mask(output_tuple, fused_kernel, batch_size,
n_heads, seq_len_q, seq_len_k, dropout_p, device)
# High Precision Math Reference
out_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref, key_ref, value_ref, dropout_p=dropout_p, is_causal=is_causal,
dropout_mask=dropout_mask)[0]
# Low Precision Math Reference
out_lp_ref = torch.ops.aten._scaled_dot_product_attention_math(
query, key, value, dropout_p=dropout_p, is_causal=is_causal,
dropout_mask=dropout_mask)[0]
g1 = torch.cuda.CUDAGraph()
with torch.cuda.graph(g1):
grads = torch.autograd.grad(out, (query, key, value), upstream_grad)
g1.replay()
if fused_kernel != SDPBackend.CUDNN_ATTENTION or dropout_p == 0.0:
grads_ref_lp = torch.autograd.grad(out_lp_ref, (query, key, value), upstream_grad)
grads_ref = torch.autograd.grad(out_ref, (query_ref, key_ref, value_ref), upstream_grad)
check_out_and_grad(
(out_ref, out_lp_ref, out),
*zip(grads_ref, grads_ref_lp, grads),
fudge_factors={
'out': 3.0,
'grad_query': 100.0,
'grad_key': 8.0,
'grad_value': 3.0,
}
)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_transformers.py
|
rand_nt
|
def rand_nt(shape):
batch, seq_len, num_heads, head_dim = shape
tensors = [6 * torch.rand((seq_len, 3 * num_heads * head_dim), device="cuda", dtype=torch.float32) - 3
for _ in range(batch)]
return (torch.nested.nested_tensor(tensors, device="cuda", dtype=torch.float32),
torch.nested.nested_tensor(tensors, device="cuda", dtype=torch.float16))
|
def rand_nt(shape):
batch, seq_len, num_heads, head_dim = shape
tensors = [6 * torch.rand((seq_len, 3 * num_heads * head_dim), device=device, dtype=torch.float32) - 3
for _ in range(batch)]
return (torch.nested.nested_tensor(tensors, device=device, dtype=torch.float32),
torch.nested.nested_tensor(tensors, device=device, dtype=torch.float16))
|
import contextlib
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
from torch.backends.cuda import sdp_kernel, SDPBackend
import torch.optim as optim
from torch.testing._internal.common_dtype import floating_types_and_half
from typing import Tuple
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
TEST_FAIRSEQ,
run_tests,
parametrize,
instantiate_parametrized_tests,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck
)
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import TEST_CUDA, SM80OrLater, PLATFORM_SUPPORTS_FUSED_SDPA
import fairseq.models.transformer as fairseq_transformer
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM86Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (8, 6)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_data_ptr_of_empty_tensor_with_storage
|
def test_data_ptr_of_empty_tensor_with_storage(self):
t = torch.empty((2, 2))
self.assertNotEqual(t.data_ptr(), 0)
t.resize_((0, 2))
self.assertEqual(t.data_ptr(), 0)
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
test_is_causal_equals_upper_left
|
def test_is_causal_equals_upper_left(self, device, shape: List[Tuple[int]]):
make_tensor = partial(
torch.rand, device=device, dtype=torch.float16, requires_grad=True
)
bsz, num_heads, seq_len_q, seq_len_kv, head_dim = shape
make_q_tensor = partial(make_tensor, SdpaShape(bsz, num_heads, seq_len_q, head_dim))
make_kv_tensor = partial(make_tensor, SdpaShape(bsz, num_heads, seq_len_kv, head_dim))
forw_tol = Tolerances(1e-3, 1e-3)
grad_tol = Tolerances(5e-3, 5e-3)
query = make_q_tensor()
key = make_kv_tensor()
value = make_kv_tensor()
attn_bias = causal_upper_left(seq_len_q, seq_len_kv)
out_attn_bias = scaled_dot_product_attention(query, key, value, attn_mask=attn_bias, dropout_p=0.0)
out_is_causal = scaled_dot_product_attention(query, key, value, is_causal=True, dropout_p=0.0)
torch.testing.assert_close(out_attn_bias, out_is_causal, rtol=forw_tol.rtol, atol=forw_tol.atol)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
class TestAttnBias(NNTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
test_is_causal_and_mask_fails
|
def test_is_causal_and_mask_fails(self, device):
make_tensor = partial(
torch.rand, device=device, dtype=torch.float16, requires_grad=True
)
make_q_tensor = partial(make_tensor, SdpaShape(16, 16, 128, 16))
make_kv_tensor = partial(make_tensor, SdpaShape(16, 16, 128, 16))
query = make_q_tensor()
key = make_kv_tensor()
value = make_kv_tensor()
attn_bias = causal_upper_left(128, 128)
with self.assertRaisesRegex(ValueError, "CausalBias should not be used with causal=True"):
scaled_dot_product_attention(query, key, value, attn_mask=attn_bias, is_causal=True, dropout_p=0.0)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
class TestAttnBias(NNTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
test_fused_sdp_choice_privateuseone
|
def test_fused_sdp_choice_privateuseone(self):
batch_size, seq_len, num_heads, head_dim = 4, 256, 2, 128
make_tensor = partial(torch.rand, device="cpu", dtype=torch.float16)
shape = SdpaShape(batch_size, num_heads, seq_len, head_dim)
q_cpu, k_cpu, v_cpu = make_tensor(shape), make_tensor(shape), make_tensor(shape)
q_privateuse1 = q_cpu.to("foo")
k_privateuse1 = k_cpu.to("foo")
v_privateuse1 = v_cpu.to("foo")
assert torch._fused_sdp_choice(q_privateuse1, k_privateuse1, v_privateuse1) == SDPBackend.OVERRIDEABLE.value
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
@unittest.skipIf(TEST_XPU, "XPU does not support cppextension currently")
@unittest.skipIf(IS_FBCODE, "Ninja is required to load C++ extensions and it's not compatible with Buck ")
class TestSDPAPrivateUse1Only(NNTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
test_scaled_dot_product_fused_attention_overrideable
|
def test_scaled_dot_product_fused_attention_overrideable(self):
batch_size, seq_len, num_heads, head_dim = 4, 256, 2, 128
make_tensor = partial(torch.rand, device="cpu", dtype=torch.float16)
shape = SdpaShape(batch_size, num_heads, seq_len, head_dim)
q_cpu, k_cpu, v_cpu = make_tensor(shape), make_tensor(shape), make_tensor(shape)
q_privateuse1 = q_cpu.to("foo")
k_privateuse1 = k_cpu.to("foo")
v_privateuse1 = v_cpu.to("foo")
actual = torch.nn.functional.scaled_dot_product_attention(
q_privateuse1, k_privateuse1, v_privateuse1, attn_mask=None, dropout_p=0.0)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
@unittest.skipIf(TEST_XPU, "XPU does not support cppextension currently")
@unittest.skipIf(IS_FBCODE, "Ninja is required to load C++ extensions and it's not compatible with Buck ")
class TestSDPAPrivateUse1Only(NNTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
test_scaled_dot_product_fused_attention_overrideable_backward
|
if __name__ == '__main__':
run_tests()
|
def test_scaled_dot_product_fused_attention_overrideable_backward(self):
batch_size, seq_len, num_heads, head_dim = 4, 256, 2, 128
make_tensor = partial(torch.rand, device="cpu", dtype=torch.float16, requires_grad=True)
shape = (batch_size, num_heads, seq_len, head_dim)
q_cpu, k_cpu, v_cpu = make_tensor(shape), make_tensor(shape), make_tensor(shape)
attn_mask = make_tensor((batch_size, num_heads, seq_len, seq_len))
q_privateuse1 = q_cpu.to("foo")
k_privateuse1 = k_cpu.to("foo")
v_privateuse1 = v_cpu.to("foo")
attn_mask_privateuse1 = attn_mask.to("foo")
output, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, philox_seed, philox_offset, debug_attn_mask = \
torch.ops.aten._scaled_dot_product_fused_attention_overrideable(
q_privateuse1, k_privateuse1, v_privateuse1, attn_bias=attn_mask_privateuse1)
rand_upward = torch.rand(shape, device="cpu", dtype=torch.float16, requires_grad=False)
rand_upward_privateuse1 = rand_upward.to("foo")
grad_input_mask = [True, True, True, True]
grad_q, grad_k, grad_v, grad_attn_mask = torch.ops.aten._scaled_dot_product_fused_attention_overrideable_backward(
rand_upward_privateuse1, q_privateuse1, k_privateuse1, v_privateuse1, attn_mask_privateuse1,
grad_input_mask, output, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p=0.0,
is_causal=False, philox_seed=philox_seed, philox_offset=philox_offset)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
@unittest.skipIf(TEST_XPU, "XPU does not support cppextension currently")
@unittest.skipIf(IS_FBCODE, "Ninja is required to load C++ extensions and it's not compatible with Buck ")
class TestSDPAPrivateUse1Only(NNTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_type_hints.py
|
get_all_examples
|
def get_all_examples():
"""get_all_examples() -> str
This function grabs (hopefully all) examples from the torch documentation
strings and puts them in one nonsensical module returned as a string.
"""
blocklist = {
"_np",
}
allexamples = ""
example_file_lines = [
"import torch",
"import torch.nn.functional as F",
"import math",
"import numpy",
"import io",
"import itertools",
"",
# for requires_grad_ example
# NB: We are parsing this file as Python 2, so we must use
# Python 2 type annotation syntax
"def preprocess(inp):",
" # type: (torch.Tensor) -> torch.Tensor",
" return inp",
]
for fname in dir(torch):
fn = getattr(torch, fname)
docstr = inspect.getdoc(fn)
if docstr and fname not in blocklist:
e = get_examples_from_docstring(docstr)
if e:
example_file_lines.append(f"\n\ndef example_torch_{fname}():")
example_file_lines += e
for fname in dir(torch.Tensor):
fn = getattr(torch.Tensor, fname)
docstr = inspect.getdoc(fn)
if docstr and fname not in blocklist:
e = get_examples_from_docstring(docstr)
if e:
example_file_lines.append(f"\n\ndef example_torch_tensor_{fname}():")
example_file_lines += e
return "\n".join(example_file_lines)
class TestTypeHints(TestCase):
@unittest.skipIf(not HAVE_MYPY, "need mypy")
def test_doc_examples(self):
"""
Run documentation examples through mypy.
"""
fn = Path(__file__).resolve().parent / 'generated_type_hints_smoketest.py'
with open(fn, "w") as f:
print(get_all_examples(), file=f)
# OK, so here's the deal. mypy treats installed packages
# and local modules differently: if a package is installed,
# mypy will refuse to use modules from that package for type
# checking unless the module explicitly says that it supports
# type checking. (Reference:
# https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports
# )
#
# Now, PyTorch doesn't support typechecking, and we shouldn't
# claim that it supports typechecking (it doesn't.) However, not
# claiming we support typechecking is bad for this test, which
# wants to use the partial information we get from the bits of
# PyTorch which are typed to check if it typechecks. And
# although mypy will work directly if you are working in source,
# some of our tests involve installing PyTorch and then running
# its tests.
#
# The guidance we got from Michael Sullivan and Joshua Oreman,
# and also independently developed by Thomas Viehmann,
# is that we should create a fake directory and add symlinks for
# the packages that should typecheck. So that is what we do
# here.
#
# If you want to run mypy by hand, and you run from PyTorch
# root directory, it should work fine to skip this step (since
# mypy will preferentially pick up the local files first). The
# temporary directory here is purely needed for CI. For this
# reason, we also still drop the generated file in the test
# source folder, for ease of inspection when there are failures.
with tempfile.TemporaryDirectory() as tmp_dir:
try:
os.symlink(
os.path.dirname(torch.__file__),
os.path.join(tmp_dir, 'torch'),
target_is_directory=True
)
except OSError:
raise unittest.SkipTest('cannot symlink') from None
repo_rootdir = Path(__file__).resolve().parent.parent
# TODO: Would be better not to chdir here, this affects the
# entire process!
with set_cwd(str(repo_rootdir)):
(stdout, stderr, result) = mypy.api.run([
'--cache-dir=.mypy_cache/doc',
'--no-strict-optional', # needed because of torch.lu_unpack, see gh-36584
str(fn),
])
if result != 0:
self.fail(f"mypy failed:\n{stderr}\n{stdout}")
if __name__ == '__main__':
run_tests()
|
def get_all_examples():
"""get_all_examples() -> str
This function grabs (hopefully all) examples from the torch documentation
strings and puts them in one nonsensical module returned as a string.
"""
blocklist = {
"_np",
"_InputT",
}
allexamples = ""
example_file_lines = [
"# mypy: allow-untyped-defs",
"",
"import math",
"import io",
"import itertools",
"",
"import numpy",
"",
"import torch",
"import torch.nn.functional as F",
"",
# for requires_grad_ example
# NB: We are parsing this file as Python 2, so we must use
# Python 2 type annotation syntax
"def preprocess(inp):",
" # type: (torch.Tensor) -> torch.Tensor",
" return inp",
]
for fname in dir(torch):
fn = getattr(torch, fname)
docstr = inspect.getdoc(fn)
if docstr and fname not in blocklist:
e = get_examples_from_docstring(docstr)
if e:
example_file_lines.append(f"\n\ndef example_torch_{fname}() -> None:")
example_file_lines += e
for fname in dir(torch.Tensor):
fn = getattr(torch.Tensor, fname)
docstr = inspect.getdoc(fn)
if docstr and fname not in blocklist:
e = get_examples_from_docstring(docstr)
if e:
example_file_lines.append(
f"\n\ndef example_torch_tensor_{fname}() -> None:"
)
example_file_lines += e
return "\n".join(example_file_lines)
class TestTypeHints(TestCase):
@unittest.skipIf(not HAVE_MYPY, "need mypy")
def test_doc_examples(self):
"""
Run documentation examples through mypy.
"""
fn = Path(__file__).resolve().parent / "generated_type_hints_smoketest.py"
fn.write_text(get_all_examples())
# OK, so here's the deal. mypy treats installed packages
# and local modules differently: if a package is installed,
# mypy will refuse to use modules from that package for type
# checking unless the module explicitly says that it supports
# type checking. (Reference:
# https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports
# )
#
# Now, PyTorch doesn't support typechecking, and we shouldn't
# claim that it supports typechecking (it doesn't.) However, not
# claiming we support typechecking is bad for this test, which
# wants to use the partial information we get from the bits of
# PyTorch which are typed to check if it typechecks. And
# although mypy will work directly if you are working in source,
# some of our tests involve installing PyTorch and then running
# its tests.
#
# The guidance we got from Michael Sullivan and Joshua Oreman,
# and also independently developed by Thomas Viehmann,
# is that we should create a fake directory and add symlinks for
# the packages that should typecheck. So that is what we do
# here.
#
# If you want to run mypy by hand, and you run from PyTorch
# root directory, it should work fine to skip this step (since
# mypy will preferentially pick up the local files first). The
# temporary directory here is purely needed for CI. For this
# reason, we also still drop the generated file in the test
# source folder, for ease of inspection when there are failures.
with tempfile.TemporaryDirectory() as tmp_dir:
try:
os.symlink(
os.path.dirname(torch.__file__),
os.path.join(tmp_dir, "torch"),
target_is_directory=True,
)
except OSError:
raise unittest.SkipTest("cannot symlink") from None
repo_rootdir = Path(__file__).resolve().parent.parent
# TODO: Would be better not to chdir here, this affects the
# entire process!
with set_cwd(str(repo_rootdir)):
(stdout, stderr, result) = mypy.api.run(
[
"--cache-dir=.mypy_cache/doc",
"--no-strict-optional", # needed because of torch.lu_unpack, see gh-36584
str(fn),
]
)
if result != 0:
self.fail(f"mypy failed:\n{stderr}\n{stdout}")
if __name__ == "__main__":
run_tests()
|
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, set_cwd
import tempfile
import torch
import doctest
import os
import inspect
from pathlib import Path
import mypy.api
|
import doctest
import inspect
import os
import tempfile
import unittest
from pathlib import Path
import torch
from torch.testing._internal.common_utils import run_tests, set_cwd, TestCase
import mypy.api
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_type_hints.py
|
test_doc_examples
|
def test_doc_examples(self):
"""
Run documentation examples through mypy.
"""
fn = Path(__file__).resolve().parent / 'generated_type_hints_smoketest.py'
with open(fn, "w") as f:
print(get_all_examples(), file=f)
# OK, so here's the deal. mypy treats installed packages
# and local modules differently: if a package is installed,
# mypy will refuse to use modules from that package for type
# checking unless the module explicitly says that it supports
# type checking. (Reference:
# https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports
# )
#
# Now, PyTorch doesn't support typechecking, and we shouldn't
# claim that it supports typechecking (it doesn't.) However, not
# claiming we support typechecking is bad for this test, which
# wants to use the partial information we get from the bits of
# PyTorch which are typed to check if it typechecks. And
# although mypy will work directly if you are working in source,
# some of our tests involve installing PyTorch and then running
# its tests.
#
# The guidance we got from Michael Sullivan and Joshua Oreman,
# and also independently developed by Thomas Viehmann,
# is that we should create a fake directory and add symlinks for
# the packages that should typecheck. So that is what we do
# here.
#
# If you want to run mypy by hand, and you run from PyTorch
# root directory, it should work fine to skip this step (since
# mypy will preferentially pick up the local files first). The
# temporary directory here is purely needed for CI. For this
# reason, we also still drop the generated file in the test
# source folder, for ease of inspection when there are failures.
with tempfile.TemporaryDirectory() as tmp_dir:
try:
os.symlink(
os.path.dirname(torch.__file__),
os.path.join(tmp_dir, 'torch'),
target_is_directory=True
)
except OSError:
raise unittest.SkipTest('cannot symlink') from None
repo_rootdir = Path(__file__).resolve().parent.parent
# TODO: Would be better not to chdir here, this affects the
# entire process!
with set_cwd(str(repo_rootdir)):
(stdout, stderr, result) = mypy.api.run([
'--cache-dir=.mypy_cache/doc',
'--no-strict-optional', # needed because of torch.lu_unpack, see gh-36584
str(fn),
])
if result != 0:
self.fail(f"mypy failed:\n{stderr}\n{stdout}")
|
def test_doc_examples(self):
"""
Run documentation examples through mypy.
"""
fn = Path(__file__).resolve().parent / "generated_type_hints_smoketest.py"
fn.write_text(get_all_examples())
# OK, so here's the deal. mypy treats installed packages
# and local modules differently: if a package is installed,
# mypy will refuse to use modules from that package for type
# checking unless the module explicitly says that it supports
# type checking. (Reference:
# https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports
# )
#
# Now, PyTorch doesn't support typechecking, and we shouldn't
# claim that it supports typechecking (it doesn't.) However, not
# claiming we support typechecking is bad for this test, which
# wants to use the partial information we get from the bits of
# PyTorch which are typed to check if it typechecks. And
# although mypy will work directly if you are working in source,
# some of our tests involve installing PyTorch and then running
# its tests.
#
# The guidance we got from Michael Sullivan and Joshua Oreman,
# and also independently developed by Thomas Viehmann,
# is that we should create a fake directory and add symlinks for
# the packages that should typecheck. So that is what we do
# here.
#
# If you want to run mypy by hand, and you run from PyTorch
# root directory, it should work fine to skip this step (since
# mypy will preferentially pick up the local files first). The
# temporary directory here is purely needed for CI. For this
# reason, we also still drop the generated file in the test
# source folder, for ease of inspection when there are failures.
with tempfile.TemporaryDirectory() as tmp_dir:
try:
os.symlink(
os.path.dirname(torch.__file__),
os.path.join(tmp_dir, "torch"),
target_is_directory=True,
)
except OSError:
raise unittest.SkipTest("cannot symlink") from None
repo_rootdir = Path(__file__).resolve().parent.parent
# TODO: Would be better not to chdir here, this affects the
# entire process!
with set_cwd(str(repo_rootdir)):
(stdout, stderr, result) = mypy.api.run(
[
"--cache-dir=.mypy_cache/doc",
"--no-strict-optional", # needed because of torch.lu_unpack, see gh-36584
str(fn),
]
)
if result != 0:
self.fail(f"mypy failed:\n{stderr}\n{stdout}")
|
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, set_cwd
import tempfile
import torch
import doctest
import os
import inspect
from pathlib import Path
import mypy.api
class TestTypeHints(TestCase):
|
import doctest
import inspect
import os
import tempfile
import unittest
from pathlib import Path
import torch
from torch.testing._internal.common_utils import run_tests, set_cwd, TestCase
import mypy.api
class TestTypeHints(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_type_info.py
|
test_finfo
|
def test_finfo(self):
initial_default_type = torch.get_default_dtype()
for dtype in [torch.float16, torch.float32, torch.float64, torch.complex64, torch.complex128]:
x = torch.zeros((2, 2), dtype=dtype)
xinfo = torch.finfo(x.dtype)
xn = x.cpu().numpy()
xninfo = np.finfo(xn.dtype)
self.assertEqual(xinfo.bits, xninfo.bits)
self.assertEqual(xinfo.max, xninfo.max)
self.assertEqual(xinfo.min, xninfo.min)
self.assertEqual(xinfo.eps, xninfo.eps)
self.assertEqual(xinfo.tiny, xninfo.tiny)
self.assertEqual(xinfo.resolution, xninfo.resolution)
self.assertEqual(xinfo.dtype, xninfo.dtype)
if not dtype.is_complex:
torch.set_default_dtype(dtype)
self.assertEqual(torch.finfo(dtype), torch.finfo())
# Special test case for BFloat16 type
x = torch.zeros((2, 2), dtype=torch.bfloat16)
xinfo = torch.finfo(x.dtype)
self.assertEqual(xinfo.bits, 16)
self.assertEqual(xinfo.max, 3.38953e+38)
self.assertEqual(xinfo.min, -3.38953e+38)
self.assertEqual(xinfo.eps, 0.0078125)
self.assertEqual(xinfo.tiny, 1.17549e-38)
self.assertEqual(xinfo.tiny, xinfo.smallest_normal)
self.assertEqual(xinfo.resolution, 0.01)
self.assertEqual(xinfo.dtype, "bfloat16")
torch.set_default_dtype(x.dtype)
self.assertEqual(torch.finfo(x.dtype), torch.finfo())
# Restore the default type to ensure that the test has no side effect
torch.set_default_dtype(initial_default_type)
|
def test_finfo(self):
for dtype in [
torch.float16,
torch.float32,
torch.float64,
torch.complex64,
torch.complex128,
]:
x = torch.zeros((2, 2), dtype=dtype)
xinfo = torch.finfo(x.dtype)
xn = x.cpu().numpy()
xninfo = np.finfo(xn.dtype)
self.assertEqual(xinfo.bits, xninfo.bits)
self.assertEqual(xinfo.max, xninfo.max)
self.assertEqual(xinfo.min, xninfo.min)
self.assertEqual(xinfo.eps, xninfo.eps)
self.assertEqual(xinfo.tiny, xninfo.tiny)
self.assertEqual(xinfo.resolution, xninfo.resolution)
self.assertEqual(xinfo.dtype, xninfo.dtype)
if not dtype.is_complex:
with set_default_dtype(dtype):
self.assertEqual(torch.finfo(dtype), torch.finfo())
# Special test case for BFloat16 type
x = torch.zeros((2, 2), dtype=torch.bfloat16)
xinfo = torch.finfo(x.dtype)
self.assertEqual(xinfo.bits, 16)
self.assertEqual(xinfo.max, 3.38953e38)
self.assertEqual(xinfo.min, -3.38953e38)
self.assertEqual(xinfo.eps, 0.0078125)
self.assertEqual(xinfo.tiny, 1.17549e-38)
self.assertEqual(xinfo.tiny, xinfo.smallest_normal)
self.assertEqual(xinfo.resolution, 0.01)
self.assertEqual(xinfo.dtype, "bfloat16")
with set_default_dtype(x.dtype):
self.assertEqual(torch.finfo(x.dtype), torch.finfo())
# Special test case for Float8_E5M2
xinfo = torch.finfo(torch.float8_e5m2)
self.assertEqual(xinfo.bits, 8)
self.assertEqual(xinfo.max, 57344.0)
self.assertEqual(xinfo.min, -57344.0)
self.assertEqual(xinfo.eps, 0.25)
self.assertEqual(xinfo.tiny, 6.10352e-05)
self.assertEqual(xinfo.resolution, 1.0)
self.assertEqual(xinfo.dtype, "float8_e5m2")
# Special test case for Float8_E4M3FN
xinfo = torch.finfo(torch.float8_e4m3fn)
self.assertEqual(xinfo.bits, 8)
self.assertEqual(xinfo.max, 448.0)
self.assertEqual(xinfo.min, -448.0)
self.assertEqual(xinfo.eps, 0.125)
self.assertEqual(xinfo.tiny, 0.015625)
self.assertEqual(xinfo.resolution, 1.0)
self.assertEqual(xinfo.dtype, "float8_e4m3fn")
|
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY, load_tests
load_tests = load_tests
import torch
import unittest
import numpy as np
class TestDTypeInfo(TestCase):
|
from torch.testing._internal.common_utils import (
load_tests,
run_tests,
set_default_dtype,
TEST_NUMPY,
TestCase,
)
load_tests = load_tests
import sys
import unittest
import torch
import numpy as np
class TestDTypeInfo(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_type_info.py
|
test_to_complex
|
def test_to_complex(self):
# Regression test for https://github.com/pytorch/pytorch/issues/124868
# If reference count is leaked this would be a set of 10 elements
ref_cnt = {sys.getrefcount(torch.float32.to_complex()) for _ in range(10)}
self.assertLess(len(ref_cnt), 3)
self.assertEqual(torch.float64.to_complex(), torch.complex128)
self.assertEqual(torch.float32.to_complex(), torch.complex64)
self.assertEqual(torch.float16.to_complex(), torch.complex32)
|
from torch.testing._internal.common_utils import (
load_tests,
run_tests,
set_default_dtype,
TEST_NUMPY,
TestCase,
)
load_tests = load_tests
import sys
import unittest
import torch
import numpy as np
class TestDTypeInfo(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_type_promotion.py
|
float_double_default_dtype
|
def float_double_default_dtype(fn):
@wraps(fn)
def wrapped_fn(*args, **kwargs):
with set_default_dtype(torch.float):
fn(*args, **kwargs)
with set_default_dtype(torch.double):
fn(*args, **kwargs)
return wrapped_fn
class TestTypePromotion(TestCase):
# In-place operations don't promote.
# `int+float -> float` but `int.add_(float)` is rejected as an error.
# Promoting inplace would require re-allocating and copying the memory of the
# tensor data, since element size could change.
@float_double_default_dtype
def test_inplace(self, device):
int_tensor = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
self.assertRaisesRegex(RuntimeError, "can't be cast to", lambda: int_tensor.add_(1.5))
expected = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
long_tensor = torch.ones([4, 4, 4], dtype=torch.int64, device=device)
int_tensor.add_(long_tensor)
int_tensor.add_(1)
three = expected + 2
self.assertEqual(int_tensor, three)
self.assertEqual(int_tensor.dtype, torch.int32)
bool_tensor = torch.tensor([1, 1, 1], dtype=torch.bool, device=device)
uint8_tensor = torch.tensor([1, 1, 1], dtype=torch.uint8, device=device)
# We treat bool as a separate category, which means uint8 cannot cast to bool.
self.assertRaisesRegex(RuntimeError, "can't be cast to", lambda: bool_tensor.add_(uint8_tensor))
# We allow demotion from signed to unsigned, unlike numpy, because:
# * We don't want the performance penalty of inspecting scalar values.
# * We don't want 'signed' to be considered a distinct 'category'
# in promotion rules.
# We don't want signed to be a separate category because if it was,
# uint16_tensor + 5 would result in a long_tensor, which is not what we want.
int16_tensor = torch.tensor([1, 1, 1], dtype=torch.int16, device=device)
uint8_tensor *= int16_tensor
@float_double_default_dtype
def test_unsigned(self, device):
dont_promote = torch.ones(3, dtype=torch.uint8, device=device) + 5
self.assertEqual(dont_promote.dtype, torch.uint8)
# some basic examples
@float_double_default_dtype
def test_int_promotion(self, device):
a = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
b = torch.ones([4, 4, 4], dtype=torch.int64, device=device)
c = a + b
self.assertEqual(c, b + b)
self.assertEqual(c.dtype, torch.int64)
@float_double_default_dtype
def test_float_promotion(self, device):
def test_promotion(dtype_float, dtype_double):
a = torch.ones([4, 4, 4], dtype=dtype_float, device=device)
b = torch.ones([4, 4, 4], dtype=dtype_double, device=device)
c = a + b
self.assertEqual(c, b + b)
self.assertEqual(c.dtype, dtype_double)
c = b + a
self.assertEqual(c, b + b)
self.assertEqual(c.dtype, dtype_double)
test_promotion(torch.float, torch.double)
@float_double_default_dtype
def test_complex_promotion(self, device):
def test_promotion(dtype_float, dtype_double):
a = torch.ones([4, 4, 4], dtype=dtype_float, device=device)
b = torch.ones([4, 4, 4], dtype=dtype_double, device=device)
c = a + b
self.assertEqual(c, b + b)
self.assertEqual(c.dtype, dtype_double)
c = b + a
self.assertEqual(c, b + b)
self.assertEqual(c.dtype, dtype_double)
test_promotion(torch.complex64, torch.complex128)
a = torch.randn(3, dtype=torch.complex64, device=device)
self.assertEqual((a * 5).dtype, torch.complex64)
# not a "wrapped number"
other = torch.tensor(5.5, dtype=torch.double, device=device)
self.assertEqual((a + other).dtype, torch.complex64)
def make_scalar_tensor(dtype):
return make_tensor((), dtype=dtype, device=device)
def make_1d_tensor(dtype):
return make_tensor((3,), dtype=dtype, device=device)
def complex_scalar_tensor_test(s, t):
# As per type promotion rules,
# Complex Scalar and Float Tensor -> Complex Tensor with Value type of Float Tensor
# Complex Scalar and Integral Tensor -> Complex Tensor with Value type of Complex Scalar
if t.dtype.is_floating_point:
# defaults to return complex64 (for bfloat16)
expected_dtype = float_to_corresponding_complex_type_map.get(t.dtype, torch.complex64)
else: # integral tensor
if isinstance(s, torch.Tensor):
expected_dtype = s.dtype
else:
expected_dtype = float_to_corresponding_complex_type_map[torch.get_default_dtype()]
self.assertEqual((s * t).dtype, expected_dtype)
self.assertEqual((t * s).dtype, expected_dtype)
self.assertEqual(torch.result_type(s, t), expected_dtype)
self.assertEqual(torch.result_type(t, s), expected_dtype)
if torch.device(device).type != 'xla':
# chalf is not supported on XLA
s = make_scalar_tensor(dtype=torch.chalf)
# Same Value type
t = make_1d_tensor(dtype=torch.half)
# 0-D Tensor X 1-D Tensor
complex_scalar_tensor_test(s, t)
# Python Scalar X 1-D Tensor
complex_scalar_tensor_test(s.item(), t)
# Higher Value Type
t = make_1d_tensor(dtype=torch.float)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# Special Case
t = make_1d_tensor(dtype=torch.bfloat16)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# Integral Tensor
t = make_1d_tensor(dtype=torch.long)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# CFloat Scalar
s = make_scalar_tensor(dtype=torch.cfloat)
# Lower Value type than CFloat
t = make_1d_tensor(dtype=torch.half)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# Higher Value type than CFloat
t = make_1d_tensor(dtype=torch.double)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# Integral Tensor
t = make_1d_tensor(dtype=torch.long)
# 0-D Tensor X 1-D Tensor
complex_scalar_tensor_test(s, t)
# Python Scalar X 1-D Tensor
complex_scalar_tensor_test(s.item(), t)
# CDouble Scalar
s = make_scalar_tensor(dtype=torch.cdouble)
# Lower Value type than CDouble
t = make_1d_tensor(dtype=torch.float)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# Special Case
t = make_1d_tensor(dtype=torch.bfloat16)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
@float_double_default_dtype
def test_complex_scalar_mult_tensor_promotion(self, device):
a = 1j * torch.ones(2, device=device)
a = a + 1j
b = torch.tensor([2j, 2j], device=device)
self.assertEqual(a, b)
self.assertEqual(a.dtype, b.dtype)
@float_double_default_dtype
def test_add_wrapped(self, device):
a = torch.ones([4, 4, 4], dtype=torch.int, device=device)
b = 1
c = a + b
self.assertEqual(c, a + a)
self.assertEqual(c.dtype, torch.int)
@float_double_default_dtype
def test_int_to_float(self, device):
a = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
b = torch.ones([4, 4, 4], dtype=torch.float, device=device)
c = a + b
self.assertEqual(c.dtype, torch.float32)
# some examples from:
# https://github.com/pytorch/pytorch/issues/9515
@float_double_default_dtype
def test_from_issue(self, device):
a = torch.rand(3, dtype=torch.float32, device=device)
u = torch.tensor([0, 0, 1], dtype=torch.uint8, device=device)
self.assertEqual((a * 5).dtype, torch.float32)
self.assertEqual((u + 1).dtype, torch.uint8)
self.assertEqual((u + 1000).dtype, torch.uint8) # integer overflow
# not a "wrapped number"
other = torch.tensor(5.5, dtype=torch.double, device=device)
self.assertEqual((u + 5.5).dtype, torch.get_default_dtype())
self.assertEqual((u + other).dtype, torch.double)
# adding a 0-dim tensor to a float doesn't promote to double unless first
# type was integral.
self.assertEqual((a + other).dtype, torch.float32)
@float_double_default_dtype
def test_half(self, device):
half = torch.tensor(5.5, dtype=torch.float16, device=device)
self.assertEqual((half + 2.2).dtype, torch.float16)
self.assertEqual((half + 100000).dtype, torch.float16) # inf
default_tensor = torch.tensor(100000.0, device=device)
self.assertEqual((half + default_tensor).dtype, torch.get_default_dtype())
def test_bfloat16(self, device):
# with scalar
bf = torch.tensor(5.5, dtype=torch.bfloat16, device=device)
for scalar in (2.2, 5, 100000): # bf + 100000 is inf
self.assertEqual((bf + scalar).dtype, torch.bfloat16)
self.assertEqual(scalar + bf, bf + scalar)
for scalar in (complex(1, 1), complex(-2, 0), complex(0, -3)):
self.assertEqual((bf + scalar).dtype, torch.cfloat)
self.assertEqual(bf + scalar, scalar + bf)
# with tensor
for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool):
t = torch.tensor(1, dtype=dtype, device=device)
self.assertEqual(bf + t, t + bf)
if dtype in (torch.float16, torch.float32, torch.float64, torch.cfloat, torch.cdouble):
# Handles bfloat16 x float16 -> float32 promotion
expected_dtype = dtype if dtype != torch.half else torch.float32
elif dtype is torch.chalf:
expected_dtype = torch.cfloat
elif dtype in (torch.bool, torch.uint8,
torch.int8, torch.int16, torch.int32, torch.int64, torch.bfloat16):
expected_dtype = torch.bfloat16
else:
raise AssertionError(f'Missing dtype {dtype} not tested.')
self.assertEqual(torch.promote_types(dtype, torch.bfloat16), expected_dtype)
self.assertEqual(torch.promote_types(torch.bfloat16, dtype), expected_dtype)
self.assertEqual((bf + t).dtype, expected_dtype)
@onlyNativeDeviceTypes
def test_complex_half(self, device):
# with scalar
chalf = torch.tensor(5.5, dtype=torch.chalf, device=device)
for scalar in (2.2, 5, 100000): # chalf + 100000 is inf
self.assertEqual((chalf * scalar).dtype, torch.chalf)
self.assertEqual(scalar * chalf, chalf * scalar)
for scalar in (complex(1, 1), complex(-2, 0), complex(0, -3)):
self.assertEqual((chalf * scalar).dtype, torch.chalf)
self.assertEqual(chalf * scalar, scalar * chalf)
# with tensor
dtypes = all_types_and_complex_and(torch.chalf, torch.half, torch.bfloat16, torch.bool)
for dtype in dtypes:
t = torch.tensor(1, dtype=dtype, device=device)
self.assertEqual(chalf * t, t * chalf)
if dtype in (torch.float16, torch.chalf):
expected_dtype = torch.chalf
elif dtype in (torch.float, torch.double, torch.bfloat16):
expected_dtype = torch.cdouble if dtype is torch.double else torch.cfloat
elif dtype in (torch.cfloat, torch.cdouble):
expected_dtype = dtype
elif dtype in (torch.bool, torch.uint8,
torch.int8, torch.int16, torch.int32, torch.int64):
expected_dtype = torch.chalf
else:
raise AssertionError(f'Missing dtype {dtype} not tested.')
self.assertEqual(torch.promote_types(dtype, torch.chalf), expected_dtype)
self.assertEqual(torch.promote_types(torch.chalf, dtype), expected_dtype)
self.assertEqual((chalf * t).dtype, expected_dtype)
@float_double_default_dtype
def test_alternate_result(self, device):
x = torch.tensor([1, 1, 1, 1], dtype=torch.float, device=device)
o = torch.tensor([0, 0, 0, 0], dtype=torch.long, device=device)
self.assertRaisesRegex(RuntimeError,
"can't be cast to",
lambda: torch.add(x, x, out=o))
d = torch.tensor([1, 1, 1, 1], dtype=torch.double, device=device)
torch.add(x, x, out=d)
self.assertEqual(d.dtype, torch.double)
x = x.to(torch.double)
self.assertEqual(x + x, d)
@float_double_default_dtype
def test_mixed_type_backward(self, device):
f = torch.ones([3, 3], dtype=torch.float, requires_grad=True, device=device)
ten = torch.tensor([10.], dtype=torch.double, device=device)
tens = f * ten
s = (tens + 2).sum()
s.backward()
expected = f.grad.to(torch.double)
self.assertEqual(tens, expected)
# If we don't convert the returned grad_input to the actual input type
# we get an error like:
# RuntimeError: Function SubBackward0 returned an invalid gradient at index 0 - expected type \
# torch.FloatTensor but got torch.DoubleTensor
f_dtypes = [torch.float, torch.double]
if self.device_type == 'cuda':
f_dtypes = f_dtypes + [torch.half]
i_dtypes = [torch.int, torch.long]
for func in [torch.add, torch.sub, torch.rsub, torch.mul, torch.div]:
for dtype1, dtype2 in itertools.product(f_dtypes, f_dtypes + i_dtypes):
x = torch.ones(10, requires_grad=True, dtype=dtype1, device=device)
y = torch.ones(10, dtype=dtype2, device=device)
func(x, y).sum().backward()
def _get_test_tensor(self, device, dtype, remove_zeros=False):
shape = [5, 5, 5]
if dtype == torch.bool:
tensor = torch.randint(int(remove_zeros), 2, shape, device=device, dtype=dtype)
elif dtype.is_floating_point or dtype.is_complex:
# "_th_normal_ not supported on CPUType for Half" so simpler create and convert
tensor = torch.randn(shape, device=device)
tensor = tensor.to(dtype)
if remove_zeros:
tensor[torch.abs(tensor) < 0.05] = 5
else:
tensor = torch.randint(-5 if dtype.is_signed else 0, 10, shape, device=device, dtype=dtype)
if remove_zeros:
tensor[tensor == 0] = 5
return tensor
# verifies that torch.<op>(first, second) is the same as
# torch.<op>(first.to(common_dtype), second.to(common_dtype)) in cases where that should hold.
@float_double_default_dtype
def test_many_promotions(self, device):
# Can also include half on CPU in cases where it will be promoted to a
# supported dtype
dtypes1 = get_all_math_dtypes('cuda')
dtypes2 = get_all_math_dtypes(device)
ops = [torch.add, torch.sub, torch.mul, torch.div, torch.rsub]
for dt1, dt2 in itertools.product(dtypes1, dtypes2):
for op, non_contiguous in itertools.product(ops, [True, False]):
common_dtype = torch.promote_types(dt1, dt2)
if common_dtype == torch.half and self.device_type == 'cpu':
continue
if op == torch.sub and common_dtype != torch.bool:
# Subtraction, the `-` operator, with a bool tensor is not supported.
continue
first = self._get_test_tensor(device, dt1)
second = self._get_test_tensor(device, dt2, op == torch.div)
# test ops with non-contiguous tensors
if non_contiguous:
first = first.transpose(0, 2)
second = second.transpose(2, 1)
self.assertNotEqual(first.stride(), second.stride(),
msg="some non-contiguous issues could be missed if tensors have same strides")
self.assertEqual(not first.is_contiguous(), non_contiguous)
self.assertEqual(not second.is_contiguous(), non_contiguous)
result = op(first, second)
expected = op(first.to(common_dtype), second.to(common_dtype))
self.assertEqual(result.dtype, expected.dtype, msg='{} with {}, {}'.format(op.__name__, dt1, dt2))
self.assertEqual(result, expected, msg='{} with {}, {}'.format(op.__name__, dt1, dt2))
@float_double_default_dtype
def test_non_promoting_ops(self, device):
x = torch.ones(4, dtype=torch.double, device=device)
with self.assertRaises(RuntimeError):
torch.lerp(x, torch.ones(4, dtype=torch.float, device=device), 1)
@float_double_default_dtype
def test_alpha_mismatch(self, device):
x = torch.ones(4, dtype=torch.int, device=device)
err = 'alpha must not be'
self.assertRaisesRegex(RuntimeError, err,
lambda: torch.add(x, x, alpha=1.1))
x = x.to(torch.bool)
self.assertRaisesRegex(RuntimeError, err,
lambda: torch.add(x, x, alpha=1.1))
self.assertEqual(x + x, torch.add(x, x, alpha=True))
@float_double_default_dtype
def test_booleans(self, device):
onedim = torch.tensor([True], device=device)
self.assertEqual(onedim + onedim, onedim)
self.assertEqual(onedim + True, onedim)
self.assertEqual(torch.add(True, True), True)
self.assertEqual(torch.add(False, False), False)
self.assertEqual(torch.add(False, True), True)
self.assertRaisesRegex(RuntimeError, "Boolean alpha only supported",
lambda: torch.add(1, 1, alpha=True))
self.assertEqual(torch.add(torch.tensor(True, device=device),
torch.tensor(True, device=device), True),
torch.tensor(True, device=device))
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
@float_double_default_dtype
def test_create_bool_tensors(self, device):
expected = torch.tensor([0], dtype=torch.int64, device=device)
self.assertEqual(torch.arange(False, True, device=device), expected)
self.assertEqual(torch.arange(True, device=device), expected)
expected = torch.tensor([0, 0.5], dtype=torch.get_default_dtype(), device=device)
self.assertEqual(torch.arange(False, True, 0.5, device=device), expected)
expected = torch.ones(0, dtype=torch.int64, device=device)
self.assertEqual(torch.arange(False, False, device=device), expected)
bool_tensor_lin = torch.linspace(False, True, steps=100, device=device)
int_tensor_lin = torch.linspace(0, 1, steps=100, device=device)
self.assertEqual(bool_tensor_lin, int_tensor_lin)
bool_tensor_log = torch.linspace(False, True, steps=100, device=device)
int_tensor_log = torch.linspace(0, 1, steps=100, device=device)
self.assertEqual(bool_tensor_log, int_tensor_log)
# this seems like odd behavior but ints also create float tensors, numpy doesn't have this function.
self.assertEqual(torch.scalar_tensor(False, device=device), torch.tensor(0., device=device))
@dtypes(*itertools.product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)))
def test_result_type(self, device, dtypes):
"Test result_type for tensor vs tensor and scalar vs scalar."
def _get_dtype(x):
"Get the dtype of x if x is a tensor. If x is a scalar, get its corresponding dtype if it were a tensor."
if torch.is_tensor(x):
return x.dtype
elif isinstance(x, bool):
return torch.bool
elif isinstance(x, int):
return torch.int64
elif isinstance(x, float):
return torch.float32
elif isinstance(x, complex):
return torch.complex64
else:
raise AssertionError(f"Unknown type {x}")
# tensor against tensor
a_tensor = torch.tensor((0, 1), device=device, dtype=dtypes[0])
a_single_tensor = torch.tensor(1, device=device, dtype=dtypes[0])
a_scalar = a_single_tensor.item()
b_tensor = torch.tensor((1, 0), device=device, dtype=dtypes[1])
b_single_tensor = torch.tensor(1, device=device, dtype=dtypes[1])
b_scalar = b_single_tensor.item()
combo = ((a_tensor, a_single_tensor, a_scalar), (b_tensor, b_single_tensor, b_scalar))
for a, b in itertools.product(*combo):
dtype_a = _get_dtype(a)
dtype_b = _get_dtype(b)
try:
result = a + b
except RuntimeError:
with self.assertRaises(RuntimeError):
torch.promote_types(dtype_a, dtype_b)
with self.assertRaises(RuntimeError):
torch.result_type(a, b)
else:
dtype_res = _get_dtype(result)
if a is a_scalar and b is b_scalar and dtype_a == torch.bool and dtype_b == torch.bool:
# special case: in Python, True + True is an integer
self.assertEqual(dtype_res, torch.int64, f"a == {a}, b == {b}")
else:
self.assertEqual(dtype_res, torch.result_type(a, b), f"a == {a}, b == {b}")
if a is a_scalar and b is b_scalar: # Python internal type determination is good enough in this case
continue
if any(a is a0 and b is b0 for a0, b0 in zip(*combo)): # a and b belong to the same class
self.assertEqual(dtype_res, torch.promote_types(dtype_a, dtype_b), f"a == {a}, b == {b}")
# Spot check some result type for tensor against scalar (including single-element tensor).
@float_double_default_dtype
def test_result_type_tensor_vs_scalar(self, device):
def _test_spot(a, b, res_dtype):
self.assertEqual(torch.result_type(a, b), res_dtype)
self.assertEqual(torch.result_type(b, a), res_dtype)
_test_spot(torch.tensor([1, 2], dtype=torch.half, device=device),
torch.tensor(1, dtype=torch.long, device=device), torch.half)
_test_spot(torch.tensor(1, dtype=torch.float, device=device),
torch.tensor([1, 2], dtype=torch.double, device=device), torch.double)
_test_spot(torch.tensor(1, dtype=torch.int, device=device), 1, torch.int)
_test_spot(torch.tensor(1, device=device), 1., torch.get_default_dtype())
_test_spot(torch.tensor(1, dtype=torch.long, device=device),
torch.tensor([1, 1], dtype=torch.int, device=device), torch.int)
_test_spot(torch.tensor([1., 1.], dtype=torch.float, device=device), 1., torch.float)
_test_spot(torch.tensor([1., 1.], dtype=torch.complex64, device=device),
torch.tensor(1., dtype=torch.complex128, device=device), torch.complex64)
_test_spot(torch.tensor([1., 1.], dtype=torch.complex128, device=device),
torch.tensor(1., dtype=torch.complex64, device=device), torch.complex128)
_test_spot(torch.tensor([1, 1], dtype=torch.bool, device=device), 1., torch.get_default_dtype())
@float_double_default_dtype
def test_can_cast(self, device):
self.assertTrue(torch.can_cast(torch.double, torch.float))
self.assertFalse(torch.can_cast(torch.float, torch.int))
@float_double_default_dtype
def test_comparison_ops_with_type_promotion(self, device):
value_for_type = {
torch.uint8: (1 << 5),
torch.int8: (1 << 5),
torch.int16: (1 << 10),
torch.int32: (1 << 20),
torch.int64: (1 << 35),
torch.float16: (1 << 10),
torch.float32: (1 << 20),
torch.float64: (1 << 35),
torch.complex64: (1 << 20),
torch.complex128: (1 << 35)
}
comparison_ops = [
dict(
name="lt",
out_op=lambda x, y, d: torch.lt(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.lt(x, y),
compare_op=lambda x, y: x < y,
),
dict(
name="le",
out_op=lambda x, y, d: torch.le(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.le(x, y),
compare_op=lambda x, y: x <= y,
),
dict(
name="gt",
out_op=lambda x, y, d: torch.gt(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.gt(x, y),
compare_op=lambda x, y: x > y,
),
dict(
name="ge",
out_op=lambda x, y, d: torch.ge(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.ge(x, y),
compare_op=lambda x, y: x >= y,
),
dict(
name="eq",
out_op=lambda x, y, d: torch.eq(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.eq(x, y),
compare_op=lambda x, y: x == y,
),
dict(
name="ne",
out_op=lambda x, y, d: torch.ne(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.ne(x, y),
compare_op=lambda x, y: x != y,
),
]
for op in comparison_ops:
for dt1 in get_all_math_dtypes(device):
for dt2 in get_all_math_dtypes(device):
if (dt1.is_complex or dt2.is_complex) and not (op["name"] == "eq" or op["name"] == "ne"):
continue
val1 = value_for_type[dt1]
val2 = value_for_type[dt2]
t1 = torch.tensor([val1], dtype=dt1, device=device)
t2 = torch.tensor([val2], dtype=dt2, device=device)
expected = torch.tensor([op["compare_op"](val1, val2)], dtype=torch.bool)
out_res = op["out_op"](t1, t2, device)
self.assertEqual(out_res, expected)
self.assertTrue(out_res.dtype == torch.bool)
self.assertTrue(t1.dtype == dt1)
self.assertTrue(t2.dtype == dt2)
out_res = op["ret_op"](t1, t2)
self.assertEqual(out_res, expected)
self.assertTrue(out_res.dtype == torch.bool)
self.assertTrue(t1.dtype == dt1)
self.assertTrue(t2.dtype == dt2)
# test that comparing a zero dim tensor with another zero dim tensor has type promotion behavior
t1 = torch.tensor(val1, dtype=dt1, device=device)
t2 = torch.tensor(val2, dtype=dt2, device=device)
expected = torch.tensor(op["compare_op"](val1, val2), dtype=torch.bool)
out_res = op["out_op"](t1, t2, device)
self.assertEqual(out_res, expected)
self.assertTrue(out_res.dtype == torch.bool)
self.assertTrue(t1.dtype == dt1)
self.assertTrue(t2.dtype == dt2)
out_res = op["ret_op"](t1, t2)
self.assertEqual(out_res, expected)
self.assertTrue(out_res.dtype == torch.bool)
self.assertTrue(t1.dtype == dt1)
self.assertTrue(t2.dtype == dt2)
# XLA tests fail for self.assertRaises for complex dtypes
@onlyNativeDeviceTypes
def test_complex_assertraises(self, device):
comparison_ops = [
dict(name="lt", compare_op=lambda x, y: x < y, ),
dict(name="le", compare_op=lambda x, y: x <= y, ),
dict(name="gt", compare_op=lambda x, y: x > y, ),
dict(name="ge", compare_op=lambda x, y: x >= y, ),
dict(name="eq", compare_op=lambda x, y: x == y, ),
dict(name="ne", compare_op=lambda x, y: x != y, ),
]
for op in comparison_ops:
is_cuda = torch.device(device).type == 'cuda'
dtypes = get_all_dtypes(include_half=is_cuda,
include_bfloat16=False, include_bool=False,
include_complex32=True)
for dt1, dt2 in itertools.product(dtypes, dtypes):
if (dt1.is_complex or dt2.is_complex) and not (op["name"] == "eq" or op["name"] == "ne"):
u = torch.tensor([1], dtype=dt1, device=device)
v = torch.tensor([2], dtype=dt2, device=device)
self.assertRaises(RuntimeError, lambda: torch.tensor([op["compare_op"](u, v)], dtype=torch.bool))
@float_double_default_dtype
def test_lt_with_type_promotion(self, device):
for dt in get_all_math_dtypes(device):
x = torch.tensor([0], dtype=dt, device=device)
expected = torch.tensor([True], dtype=torch.bool, device=device)
if dt.is_complex:
continue
actual = x < 0.5
self.assertTrue(actual, expected)
self.assertTrue(actual.dtype == torch.bool)
actual = x < torch.tensor(0.5, device=device)
self.assertTrue(actual, expected)
self.assertTrue(actual.dtype == torch.bool)
x = torch.tensor(0, dtype=dt, device=device)
expected = torch.tensor(True, dtype=torch.bool, device=device)
actual = x < 0.5
self.assertTrue(actual, expected)
self.assertTrue(actual.dtype == torch.bool)
actual = x < torch.tensor(0.5, device=device)
self.assertTrue(actual, expected)
self.assertTrue(actual.dtype == torch.bool)
@float_double_default_dtype
def test_promote_types(self, device):
self.assertEqual(torch.promote_types(torch.float, torch.int), torch.float)
self.assertEqual(torch.promote_types(torch.float, torch.double), torch.double)
self.assertEqual(torch.promote_types(torch.int, torch.uint8), torch.int)
@float_double_default_dtype
def test_promote_self(self, device):
for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf, torch.bool):
self.assertEqual(torch.promote_types(dtype, dtype), dtype)
@expectedFailureMeta
@float_double_default_dtype
def test_indexing_fail(self, device):
# https://github.com/pytorch/pytorch/issues/28010
a = torch.ones(5, 2, dtype=torch.double, device=device)
b = torch.zeros(5, dtype=torch.int, device=device)
with self.assertRaises(RuntimeError):
a[:, [1]] = b.unsqueeze(-1)
@float_double_default_dtype
def test_indexing(self, device):
x = torch.ones(5, 2, dtype=torch.double, device=device)
y = torch.zeros(5, dtype=torch.double, device=device)
x[:, [1]] = y.unsqueeze(-1)
expected = torch.tensor([(1, 0), (1, 0), (1, 0), (1, 0), (1, 0)], dtype=torch.double, device=device)
self.assertEqual(x, expected)
# https://github.com/pytorch/pytorch/issues/27824
tmp = torch.ones(9, 9, dtype=torch.float, device=device)
mask = torch.ones(10, 10, dtype=torch.uint8, device=device)
result = tmp + mask[1:, 1:]
expected = torch.full([9, 9], 2., dtype=torch.float, device=device).fill_(2.)
self.assertEqual(result, expected)
@float_double_default_dtype
def test_transpose(self, device):
# https://github.com/pytorch/pytorch/issues/28502
a = torch.tensor([[True, True], [False, True]], device=device)
self.assertEqual(a.t() == 0, a.t() == False) # noqa: E712
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
@float_double_default_dtype
def test_div_promotion(self, device, dtype):
for op in (torch.div, torch.true_divide):
dividend = (torch.randn(5, device=device) * 100).to(dtype)
divisor = torch.arange(1, 6, device=device).to(dtype)
# Tests tensor/tensor division
casting_result = dividend.to(torch.get_default_dtype()) / divisor.to(torch.get_default_dtype())
self.assertEqual(casting_result, op(dividend, divisor))
# Tests tensor/scalar division
casting_result = dividend.to(torch.get_default_dtype()) / 2
self.assertEqual(casting_result, op(dividend, 2.))
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double,
torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_div_promotion_out(self, device, dtype):
for op in (torch.div, torch.true_divide):
dividend = (torch.randn(5, device=device) * 100).to(dtype)
divisor = torch.arange(1, 6, device=device).to(dtype)
# Tests that requests for an integer quotient fail
if not dtype.is_floating_point:
integral_quotient = torch.empty(5, device=device, dtype=dtype)
with self.assertRaises(RuntimeError):
op(dividend, divisor, out=integral_quotient)
with self.assertRaises(RuntimeError):
op(dividend, 2, out=integral_quotient)
else:
# Tests that requests for a floating quotient succeed
floating_quotient = torch.empty(5, device=device, dtype=dtype)
div_result = dividend / divisor
self.assertEqual(div_result,
op(dividend, divisor, out=floating_quotient))
self.assertEqual(dividend / 2,
op(dividend, 2, out=floating_quotient))
@dtypes(torch.float, torch.double,
torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_div_promotion_inplace(self, device, dtype):
for op in (torch.Tensor.div_, torch.Tensor.true_divide_):
dividend = (torch.randn(5, device=device) * 100).to(dtype)
divisor = torch.arange(1, 6, device=device).to(dtype)
# Tests that requests for an integer quotient fail
if not dtype.is_floating_point:
with self.assertRaises(RuntimeError):
op(dividend, divisor)
with self.assertRaises(RuntimeError):
op(dividend, 2)
else:
# Tests that requests for a floating quotient succeed
div_result = dividend.clone().div_(divisor)
self.assertEqual(div_result, op(dividend.clone(), divisor))
self.assertEqual(dividend.clone().div_(2), op(dividend.clone(), 2))
def _test_sparse_op_input_tensors(self, device, dtype, coalesced, zeros=True):
t = self._get_test_tensor(device, dtype, not zeros)
if zeros and dtype != torch.bool:
# ensure sparsity. Bool should already have sufficient sparsity.
mask = self._get_test_tensor(device, torch.bool)
t = t * mask
if coalesced:
s = t.to_sparse()
else:
s = t.to_sparse()
indices = torch.cat((s.indices(), s.indices()), 1)
values = torch.cat((s.values(), s.values()), 0)
s = torch.sparse_coo_tensor(indices=indices, values=values, size=s.size(), dtype=dtype, device=device)
t = s.to_dense()
self.assertEqual(s.is_coalesced(), coalesced)
self.assertEqual(s.dtype, dtype)
self.assertEqual(t.dtype, s.dtype)
return t, s
def _get_precision(self, dtype, coalesced):
if dtype == torch.half and not coalesced:
# very low precision for uncoalesced float16 sparse tensors since
# ops like (s1 + s2).to_dense() will add four low-precision
# floating point values.
return 5e-2
if dtype == torch.half:
return 1e-3
# uses default
return None
def _test_sparse_op(self, op_name, inplace, dtype1, dtype2, device, coalesced):
if dtype1.is_complex or dtype2.is_complex:
return
suffix = '_' if inplace else ''
err = "{} {}({}, {})".format(" coalesced" if coalesced else "uncoalesced", op_name + suffix, dtype1, dtype2)
def op(t1, t2, suf=None):
suf = suffix if suf is None else suf
return getattr(t1, op_name + suf)(t2)
add_sub = op_name == 'add' or op_name == 'sub'
(dense1, sparse1) = self._test_sparse_op_input_tensors(device, dtype1, coalesced)
(dense2, sparse2) = self._test_sparse_op_input_tensors(device, dtype2, coalesced, op_name != 'div')
common_dtype = torch.result_type(dense1, dense2)
if self.device_type == 'cpu' and common_dtype == torch.half:
self.assertRaises(RuntimeError, lambda: op(s1, d2))
# Skip inplace tests that would fail due to inability to cast to the output type.
# Some of these would also raise errors due to not being a supported op.
if inplace and not torch.can_cast(common_dtype, dtype1):
self.assertRaises(RuntimeError, lambda: op(dense1, sparse2))
self.assertRaises(RuntimeError, lambda: op(sparse1, sparse2))
self.assertRaises(RuntimeError, lambda: op(sparse1, dense2))
return
expected = op(dense1.clone(), dense2)
precision = self._get_precision(expected.dtype, coalesced)
rtol = None if precision is None else 0
test_tensors = [expected, dense1, sparse1, dense2, sparse2]
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors] if inplace else test_tensors
# Test op(sparse, sparse)
if op_name != 'div':
sparse = op(s1, s2)
self.assertEqual(sparse.dtype, e.dtype)
self.assertEqual(e, sparse.to_dense(), atol=precision, rtol=rtol, msg=err)
else:
# sparse division only supports division by a scalar
self.assertRaises(RuntimeError, lambda: op(s1, s2).to_dense())
# Test op(dense, sparse)
if add_sub or op_name == 'mul':
if inplace:
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors]
dense_sparse = op(d1, s2)
dense_sparse = dense_sparse.to_dense() if dense_sparse.is_sparse else dense_sparse
self.assertEqual(e, dense_sparse, atol=precision, rtol=rtol, msg=err)
else:
# sparse division only supports division by a scalar
# mul: Didn't find kernel to dispatch to for operator 'aten::_nnz'
self.assertRaises(RuntimeError, lambda: op(d1, s2))
# Test op(sparse, dense) not supported for all ops but 'mul'.
# add(sparse, dense) is not supported. Use add(dense, sparse) instead.
# sparse division only supports division by a scalar
if op_name != 'mul':
self.assertRaises(RuntimeError, lambda: op(s1, d2))
else:
# No type promotions for inplace operations, hence suf=''
op(s1, d2, suf='')
# Test op(sparse, scalar)
if not add_sub and not (self.device_type == 'cpu' and dtype1 == torch.half):
if inplace:
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors]
scalar = d2.view(d2.numel())[0].item()
sparse = op(s1, scalar)
dense_scalar = op(d1, scalar)
self.assertEqual(sparse.dtype, dense_scalar.dtype)
self.assertEqual(dense_scalar, sparse.to_dense(), atol=precision, rtol=rtol, msg=err)
else:
# add(sparse, dense) is not supported. Use add(dense, sparse) instead.
# "mul_cpu" / "div_cpu" not implemented for 'Half'
self.assertRaises(RuntimeError, lambda: op(s1, d2.view(d2.numel())[0].item()))
def _run_all_tests_for_sparse_op(self, op_name, device, dtypes):
for dtype1, dtype2 in itertools.product(dtypes, dtypes):
for inplace, coalesced in itertools.product([True, False], [True, False]):
self._test_sparse_op(op_name, inplace, dtype1, dtype2, device, coalesced)
@onlyNativeDeviceTypes
def test_sparse_add(self, device):
self._run_all_tests_for_sparse_op('add', device,
dtypes=get_all_math_dtypes(device))
@onlyNativeDeviceTypes
def test_sparse_mul(self, device):
self._run_all_tests_for_sparse_op('mul', device,
dtypes=get_all_math_dtypes(device))
@onlyNativeDeviceTypes
def test_sparse_div(self, device):
self._run_all_tests_for_sparse_op('div', device,
dtypes=(torch.float32, torch.float64,
torch.complex64, torch.complex128))
@onlyNativeDeviceTypes
def test_sparse_sub(self, device):
self._run_all_tests_for_sparse_op('sub', device,
dtypes=get_all_math_dtypes(device))
@onlyNativeDeviceTypes
@dtypes(torch.bool, torch.short, torch.uint8, torch.int, torch.long)
@float_double_default_dtype
def test_sparse_div_promotion(self, device, dtype):
for op in (torch.div, torch.true_divide):
dividend = torch.randn(5, device=device).to(dtype)
divisor = 2
dividend_sparse = dividend.to_sparse()
casting_result = dividend.to(torch.get_default_dtype()) / 2
self.assertEqual(casting_result, op(dividend_sparse, 2).to_dense())
@onlyNativeDeviceTypes
@dtypes(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)
def test_integer_addcdiv_deprecated(self, device, dtype):
t = torch.tensor(1, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, '^Integer division.+is no longer supported.+'):
torch.addcdiv(t, t, t)
with self.assertRaisesRegex(RuntimeError, '^Integer division.+is no longer supported.+'):
torch.addcdiv(t, t, t, out=t)
with self.assertRaisesRegex(RuntimeError, '^Integer division.+is no longer supported+'):
t.addcdiv_(t, t)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
@float_double_default_dtype
@onlyCPU
@dtypes(*list(itertools.product(set(numpy_to_torch_dtype_dict.values()),
set(numpy_to_torch_dtype_dict.values()))))
def test_numpy_array_binary_ufunc_promotion(self, device, dtypes):
import operator
np_type = torch_to_numpy_dtype_dict[dtypes[0]]
torch_type = dtypes[1]
t = torch.tensor((1,), device=device, dtype=torch_type)
a = np.array((1,), dtype=np_type)
a_as_t = torch.from_numpy(a).to(device=device)
for np_first in (True, False):
for op in (operator.add, torch.add):
# Acquires results of binary ufunc type promotion.
try:
actual = op(a, t) if np_first else op(t, a)
except Exception as e:
actual = e
try:
expected = op(a_as_t, t) if np_first else op(t, a_as_t)
except Exception as e:
expected = e
same_result = (type(expected) == type(actual)) and expected == actual
# Note: An "undesired failure," as opposed to an "expected failure"
# is both expected (we know the test will fail) and
# undesirable (if PyTorch was working properly the test would
# not fail). This test is affected by three issues (see below)
# that will cause undesired failures. It detects when these
# issues will occur and updates this bool accordingly.
undesired_failure = False
# A NumPy array as the first argument to the plus operator
# or as any argument to torch.add is not working as
# intended.
# See https://github.com/pytorch/pytorch/issues/36363.
if np_first and op is operator.add:
undesired_failure = True
if op is torch.add:
undesired_failure = True
# Expects the same result if undesired_failure is false
# and a different result otherwise.
# Note: These cases prettyprint the failing inputs to make
# debugging test failures easier.
if undesired_failure and same_result:
msg = ("Failure: {0} == {1}. "
"torch type was {2}. NumPy type was {3}. np_first is {4} "
"default type is {5}.").format(actual, expected,
torch_type, np_type,
np_first,
torch.get_default_dtype())
self.fail(msg)
if not undesired_failure and not same_result:
msg = ("Failure: {0} != {1}. "
"torch type was {2}. NumPy type was {3}. np_first is {4} "
"default type is {5}.").format(actual, expected,
torch_type, np_type,
np_first,
torch.get_default_dtype())
self.fail(msg)
@onlyNativeDeviceTypes
def test_cat_different_dtypes(self, device):
dtypes = all_types_and_complex_and(torch.half, torch.bool)
for x_dtype, y_dtype in itertools.product(dtypes, dtypes):
x_vals, y_vals = [1, 2, 3], [4, 5, 6]
x = torch.tensor(x_vals, device=device, dtype=x_dtype)
y = torch.tensor(y_vals, device=device, dtype=y_dtype)
if x_dtype is torch.bool:
x_vals = [1, 1, 1]
if y_dtype is torch.bool:
y_vals = [1, 1, 1]
res_dtype = torch.result_type(x, y)
expected_res = torch.tensor(x_vals + y_vals, device=device, dtype=res_dtype)
res = torch.cat([x, y])
self.assertEqual(res, expected_res, exact_dtype=True)
# cat: full and an empty tensor.
y = torch.tensor([], device=device, dtype=y_dtype)
res_dtype = torch.result_type(x, y)
expected_res = torch.tensor(x_vals + [], device=device, dtype=res_dtype)
res = torch.cat([x, y])
self.assertEqual(res, expected_res, exact_dtype=True)
@onlyNativeDeviceTypes
def test_cat_out_different_dtypes(self, device):
dtypes = all_types_and_complex_and(torch.half)
for x_dtype, y_dtype, out_dtype in itertools.product(dtypes, dtypes, dtypes):
out = torch.zeros(6, device=device, dtype=out_dtype)
x = torch.tensor([1, 2, 3], device=device, dtype=x_dtype)
y = torch.tensor([4, 5, 6], device=device, dtype=y_dtype)
expected_out = torch.tensor([1, 2, 3, 4, 5, 6], device=device, dtype=out_dtype)
if (((x_dtype.is_floating_point or y_dtype.is_floating_point)
and not (out_dtype.is_floating_point or out_dtype.is_complex))
or ((x_dtype.is_complex or y_dtype.is_complex) and not out_dtype.is_complex)):
# This combinations do not support type conversion to a different class out type
with self.assertRaises(RuntimeError):
torch.cat([x, y], out=out)
else:
torch.cat([x, y], out=out)
self.assertEqual(out, expected_out, exact_dtype=True)
# Verfies that unary ops require matching out types
@onlyNativeDeviceTypes
@dtypes(*itertools.product((torch.int64,
torch.float32, torch.float64,
torch.complex64, torch.complex128),
(torch.int64,
torch.float32, torch.float64,
torch.complex64, torch.complex128)))
def test_unary_op_out_casting(self, device, dtypes):
t = torch.tensor((1), dtype=dtypes[0], device=device)
out = torch.empty(0, dtype=dtypes[1], device=device)
ops = (torch.neg, torch.floor, torch.ceil)
float_and_int_only_ops = {torch.floor, torch.ceil}
real_only_ops = {torch.floor, torch.ceil}
for op in ops:
if dtypes[0] is not dtypes[1]:
with self.assertRaises(RuntimeError):
op(t, out=out)
elif op in real_only_ops and dtypes[0].is_complex:
with self.assertRaises(RuntimeError):
op(t, out=out)
elif (
op in float_and_int_only_ops
and (not dtypes[0].is_floating_point and not dtypes[0].is_complex)
and (not (dtypes[0] == torch.int64 and dtypes[1] == torch.int64))
and device != "meta"
):
with self.assertRaises(RuntimeError):
op(t, out=out)
else:
self.assertEqual(op(t, out=out), op(t))
self.assertEqual(op(t, out=out), out)
# Verifies that the out= argument doesn't affect the computation, that
# is, out = op(...) and op(..., out=out) produce the same result.
@onlyNativeDeviceTypes
@skipMeta
def test_computation_ignores_out(self, device):
t = torch.tensor(33000, dtype=torch.float16, device=device)
out = torch.empty(0, dtype=torch.float64, device=device)
result = torch.add(t, t, out=out)
self.assertEqual(result, t + t, exact_dtype=False)
self.assertNotEqual(result, t.double() + t, exact_dtype=False)
a = torch.tensor(1.5, dtype=torch.float16, device=device)
b = torch.tensor(.666, dtype=torch.float16, device=device)
result = torch.true_divide(a, b, out=out)
self.assertEqual(result, a / b, exact_dtype=False)
self.assertNotEqual(result, a.double() / a, exact_dtype=False)
a = torch.tensor(5, dtype=torch.uint8, device=device)
b = torch.tensor(8, dtype=torch.uint8, device=device)
result = torch.sub(a, b, out=out)
self.assertEqual(result, a - b, exact_dtype=False)
self.assertNotEqual(result, a.double() - b, exact_dtype=False)
@onlyNativeDeviceTypes
@dtypes(*itertools.product((torch.bool, torch.int, torch.float, torch.double), repeat=3))
def test_clamp_type_promotion(self, device, dtypes):
dtype0, dtype1, dtype2 = dtypes
S = 4
def make_tensor(size, dtype):
if dtype == torch.bool:
return torch.randint(2, size, dtype=dtype, device=device)
elif dtype == torch.int:
return torch.randint(10, size, dtype=dtype, device=device)
else:
return torch.randn(size, dtype=dtype, device=device)
min_t = make_tensor((S,), dtype1)
max_t = make_tensor((S,), dtype2)
mins = (min_t, min_t[0], min_t[0].item())
maxs = (max_t, max_t[0], max_t[0].item())
inp = make_tensor((S,), dtype0)
for min_v, max_v in itertools.product(mins, maxs):
if type(max_v) != type(min_v):
continue
if isinstance(min_v, torch.Tensor) and min_v.ndim == 0 and max_v.ndim == 0:
continue # 0d tensors go to scalar overload, and it's tested separately
def expected_type(inp, max, min):
arg1, arg2 = max, min
if isinstance(max, torch.Tensor) and max.ndim == 0:
# first do a maybe dimensional boundary
arg1, arg2 = min, max
exp_type = torch.result_type(inp, arg1)
inp_new = torch.empty_like(inp, dtype=exp_type)
return torch.result_type(inp_new, arg2)
exp_type = expected_type(inp, min_v, max_v)
if exp_type != torch.bool:
actual = torch.clamp(inp, min_v, max_v)
inps = list(map(lambda x: x.to(exp_type) if isinstance(x, torch.Tensor) else x,
(inp, min_v, max_v)))
expected = torch.clamp(inps[0], inps[1], inps[2])
self.assertEqual(actual, expected)
if inp.dtype in floating_types() or exp_type == inp.dtype:
actual = torch.clamp_(inp, min_v, max_v)
self.assertEqual(actual, expected, exact_dtype=False)
for val in mins:
def expected_type(inp, val):
return torch.result_type(inp, val)
exp_type = expected_type(inp, val)
if exp_type != torch.bool:
actual = torch.clamp_min(inp, val)
inps = list(map(lambda x: x.to(exp_type) if isinstance(x, torch.Tensor) else x,
(inp, val)))
expected = torch.clamp_min(inps[0], inps[1])
self.assertEqual(actual.dtype, exp_type)
self.assertEqual(actual, expected)
if inp.dtype == exp_type:
actual = torch.clamp_min_(inp, val)
self.assertEqual(actual, expected)
actual = torch.clamp_max(inp, val)
expected = torch.clamp_max(inps[0], inps[1])
self.assertEqual(actual, expected)
if inp.dtype in floating_types() or exp_type == inp.dtype:
actual = torch.clamp_max_(inp, val)
self.assertEqual(actual, expected, exact_dtype=False)
instantiate_device_type_tests(TestTypePromotion, globals())
if __name__ == '__main__':
run_tests()
|
def float_double_default_dtype(fn):
@wraps(fn)
def wrapped_fn(*args, **kwargs):
with set_default_dtype(torch.float):
fn(*args, **kwargs)
with set_default_dtype(torch.double):
fn(*args, **kwargs)
return wrapped_fn
class TestTypePromotion(TestCase):
# In-place operations don't promote.
# `int+float -> float` but `int.add_(float)` is rejected as an error.
# Promoting inplace would require re-allocating and copying the memory of the
# tensor data, since element size could change.
# https://github.com/pytorch/pytorch/issues/127049
@xfailIfTorchDynamo
@float_double_default_dtype
def test_inplace(self, device):
int_tensor = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
self.assertRaisesRegex(RuntimeError, "can't be cast to", lambda: int_tensor.add_(1.5))
expected = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
long_tensor = torch.ones([4, 4, 4], dtype=torch.int64, device=device)
int_tensor.add_(long_tensor)
int_tensor.add_(1)
three = expected + 2
self.assertEqual(int_tensor, three)
self.assertEqual(int_tensor.dtype, torch.int32)
bool_tensor = torch.tensor([1, 1, 1], dtype=torch.bool, device=device)
uint8_tensor = torch.tensor([1, 1, 1], dtype=torch.uint8, device=device)
# We treat bool as a separate category, which means uint8 cannot cast to bool.
self.assertRaisesRegex(RuntimeError, "can't be cast to", lambda: bool_tensor.add_(uint8_tensor))
# We allow demotion from signed to unsigned, unlike numpy, because:
# * We don't want the performance penalty of inspecting scalar values.
# * We don't want 'signed' to be considered a distinct 'category'
# in promotion rules.
# We don't want signed to be a separate category because if it was,
# uint16_tensor + 5 would result in a long_tensor, which is not what we want.
int16_tensor = torch.tensor([1, 1, 1], dtype=torch.int16, device=device)
uint8_tensor *= int16_tensor
@float_double_default_dtype
def test_unsigned(self, device):
dont_promote = torch.ones(3, dtype=torch.uint8, device=device) + 5
self.assertEqual(dont_promote.dtype, torch.uint8)
# some basic examples
@float_double_default_dtype
def test_int_promotion(self, device):
a = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
b = torch.ones([4, 4, 4], dtype=torch.int64, device=device)
c = a + b
self.assertEqual(c, b + b)
self.assertEqual(c.dtype, torch.int64)
@float_double_default_dtype
def test_float_promotion(self, device):
def test_promotion(dtype_float, dtype_double):
a = torch.ones([4, 4, 4], dtype=dtype_float, device=device)
b = torch.ones([4, 4, 4], dtype=dtype_double, device=device)
c = a + b
self.assertEqual(c, b + b)
self.assertEqual(c.dtype, dtype_double)
c = b + a
self.assertEqual(c, b + b)
self.assertEqual(c.dtype, dtype_double)
test_promotion(torch.float, torch.double)
@float_double_default_dtype
def test_complex_promotion(self, device):
def test_promotion(dtype_float, dtype_double):
a = torch.ones([4, 4, 4], dtype=dtype_float, device=device)
b = torch.ones([4, 4, 4], dtype=dtype_double, device=device)
c = a + b
self.assertEqual(c, b + b)
self.assertEqual(c.dtype, dtype_double)
c = b + a
self.assertEqual(c, b + b)
self.assertEqual(c.dtype, dtype_double)
test_promotion(torch.complex64, torch.complex128)
a = torch.randn(3, dtype=torch.complex64, device=device)
self.assertEqual((a * 5).dtype, torch.complex64)
# not a "wrapped number"
other = torch.tensor(5.5, dtype=torch.double, device=device)
self.assertEqual((a + other).dtype, torch.complex64)
def make_scalar_tensor(dtype):
return make_tensor((), dtype=dtype, device=device)
def make_1d_tensor(dtype):
return make_tensor((3,), dtype=dtype, device=device)
def complex_scalar_tensor_test(s, t):
# As per type promotion rules,
# Complex Scalar and Float Tensor -> Complex Tensor with Value type of Float Tensor
# Complex Scalar and Integral Tensor -> Complex Tensor with Value type of Complex Scalar
if t.dtype.is_floating_point:
# defaults to return complex64 (for bfloat16)
expected_dtype = float_to_corresponding_complex_type_map.get(t.dtype, torch.complex64)
else: # integral tensor
if isinstance(s, torch.Tensor):
expected_dtype = s.dtype
else:
expected_dtype = float_to_corresponding_complex_type_map[torch.get_default_dtype()]
self.assertEqual((s * t).dtype, expected_dtype)
self.assertEqual((t * s).dtype, expected_dtype)
self.assertEqual(torch.result_type(s, t), expected_dtype)
self.assertEqual(torch.result_type(t, s), expected_dtype)
if torch.device(device).type != 'xla':
# chalf is not supported on XLA
s = make_scalar_tensor(dtype=torch.chalf)
# Same Value type
t = make_1d_tensor(dtype=torch.half)
# 0-D Tensor X 1-D Tensor
complex_scalar_tensor_test(s, t)
# Python Scalar X 1-D Tensor
complex_scalar_tensor_test(s.item(), t)
# Higher Value Type
t = make_1d_tensor(dtype=torch.float)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# Special Case
t = make_1d_tensor(dtype=torch.bfloat16)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# Integral Tensor
t = make_1d_tensor(dtype=torch.long)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# CFloat Scalar
s = make_scalar_tensor(dtype=torch.cfloat)
# Lower Value type than CFloat
t = make_1d_tensor(dtype=torch.half)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# Higher Value type than CFloat
t = make_1d_tensor(dtype=torch.double)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# Integral Tensor
t = make_1d_tensor(dtype=torch.long)
# 0-D Tensor X 1-D Tensor
complex_scalar_tensor_test(s, t)
# Python Scalar X 1-D Tensor
complex_scalar_tensor_test(s.item(), t)
# CDouble Scalar
s = make_scalar_tensor(dtype=torch.cdouble)
# Lower Value type than CDouble
t = make_1d_tensor(dtype=torch.float)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
# Special Case
t = make_1d_tensor(dtype=torch.bfloat16)
complex_scalar_tensor_test(s, t)
complex_scalar_tensor_test(s.item(), t)
@float_double_default_dtype
def test_complex_scalar_mult_tensor_promotion(self, device):
a = 1j * torch.ones(2, device=device)
a = a + 1j
b = torch.tensor([2j, 2j], device=device)
self.assertEqual(a, b)
self.assertEqual(a.dtype, b.dtype)
@float_double_default_dtype
def test_add_wrapped(self, device):
a = torch.ones([4, 4, 4], dtype=torch.int, device=device)
b = 1
c = a + b
self.assertEqual(c, a + a)
self.assertEqual(c.dtype, torch.int)
@float_double_default_dtype
def test_int_to_float(self, device):
a = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
b = torch.ones([4, 4, 4], dtype=torch.float, device=device)
c = a + b
self.assertEqual(c.dtype, torch.float32)
# some examples from:
# https://github.com/pytorch/pytorch/issues/9515
@float_double_default_dtype
def test_from_issue(self, device):
a = torch.rand(3, dtype=torch.float32, device=device)
u = torch.tensor([0, 0, 1], dtype=torch.uint8, device=device)
self.assertEqual((a * 5).dtype, torch.float32)
self.assertEqual((u + 1).dtype, torch.uint8)
self.assertEqual((u + 1000).dtype, torch.uint8) # integer overflow
# not a "wrapped number"
other = torch.tensor(5.5, dtype=torch.double, device=device)
self.assertEqual((u + 5.5).dtype, torch.get_default_dtype())
self.assertEqual((u + other).dtype, torch.double)
# adding a 0-dim tensor to a float doesn't promote to double unless first
# type was integral.
self.assertEqual((a + other).dtype, torch.float32)
@float_double_default_dtype
def test_half(self, device):
half = torch.tensor(5.5, dtype=torch.float16, device=device)
self.assertEqual((half + 2.2).dtype, torch.float16)
self.assertEqual((half + 100000).dtype, torch.float16) # inf
default_tensor = torch.tensor(100000.0, device=device)
self.assertEqual((half + default_tensor).dtype, torch.get_default_dtype())
def test_bfloat16(self, device):
# with scalar
bf = torch.tensor(5.5, dtype=torch.bfloat16, device=device)
for scalar in (2.2, 5, 100000): # bf + 100000 is inf
self.assertEqual((bf + scalar).dtype, torch.bfloat16)
self.assertEqual(scalar + bf, bf + scalar)
for scalar in (complex(1, 1), complex(-2, 0), complex(0, -3)):
self.assertEqual((bf + scalar).dtype, torch.cfloat)
self.assertEqual(bf + scalar, scalar + bf)
# with tensor
for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool):
t = torch.tensor(1, dtype=dtype, device=device)
self.assertEqual(bf + t, t + bf)
if dtype in (torch.float16, torch.float32, torch.float64, torch.cfloat, torch.cdouble):
# Handles bfloat16 x float16 -> float32 promotion
expected_dtype = dtype if dtype != torch.half else torch.float32
elif dtype is torch.chalf:
expected_dtype = torch.cfloat
elif dtype in (torch.bool, torch.uint8,
torch.int8, torch.int16, torch.int32, torch.int64, torch.bfloat16):
expected_dtype = torch.bfloat16
else:
raise AssertionError(f'Missing dtype {dtype} not tested.')
self.assertEqual(torch.promote_types(dtype, torch.bfloat16), expected_dtype)
self.assertEqual(torch.promote_types(torch.bfloat16, dtype), expected_dtype)
self.assertEqual((bf + t).dtype, expected_dtype)
@onlyNativeDeviceTypes
def test_complex_half(self, device):
# with scalar
chalf = torch.tensor(5.5, dtype=torch.chalf, device=device)
for scalar in (2.2, 5, 100000): # chalf + 100000 is inf
self.assertEqual((chalf * scalar).dtype, torch.chalf)
self.assertEqual(scalar * chalf, chalf * scalar)
for scalar in (complex(1, 1), complex(-2, 0), complex(0, -3)):
self.assertEqual((chalf * scalar).dtype, torch.chalf)
self.assertEqual(chalf * scalar, scalar * chalf)
# with tensor
dtypes = all_types_and_complex_and(torch.chalf, torch.half, torch.bfloat16, torch.bool)
for dtype in dtypes:
t = torch.tensor(1, dtype=dtype, device=device)
self.assertEqual(chalf * t, t * chalf)
if dtype in (torch.float16, torch.chalf):
expected_dtype = torch.chalf
elif dtype in (torch.float, torch.double, torch.bfloat16):
expected_dtype = torch.cdouble if dtype is torch.double else torch.cfloat
elif dtype in (torch.cfloat, torch.cdouble):
expected_dtype = dtype
elif dtype in (torch.bool, torch.uint8,
torch.int8, torch.int16, torch.int32, torch.int64):
expected_dtype = torch.chalf
else:
raise AssertionError(f'Missing dtype {dtype} not tested.')
self.assertEqual(torch.promote_types(dtype, torch.chalf), expected_dtype)
self.assertEqual(torch.promote_types(torch.chalf, dtype), expected_dtype)
self.assertEqual((chalf * t).dtype, expected_dtype)
@float_double_default_dtype
def test_alternate_result(self, device):
x = torch.tensor([1, 1, 1, 1], dtype=torch.float, device=device)
o = torch.tensor([0, 0, 0, 0], dtype=torch.long, device=device)
self.assertRaisesRegex(RuntimeError,
"can't be cast to",
lambda: torch.add(x, x, out=o))
d = torch.tensor([1, 1, 1, 1], dtype=torch.double, device=device)
torch.add(x, x, out=d)
self.assertEqual(d.dtype, torch.double)
x = x.to(torch.double)
self.assertEqual(x + x, d)
@float_double_default_dtype
def test_mixed_type_backward(self, device):
f = torch.ones([3, 3], dtype=torch.float, requires_grad=True, device=device)
ten = torch.tensor([10.], dtype=torch.double, device=device)
tens = f * ten
s = (tens + 2).sum()
s.backward()
expected = f.grad.to(torch.double)
self.assertEqual(tens, expected)
# If we don't convert the returned grad_input to the actual input type
# we get an error like:
# RuntimeError: Function SubBackward0 returned an invalid gradient at index 0 - expected type \
# torch.FloatTensor but got torch.DoubleTensor
f_dtypes = [torch.float, torch.double]
if self.device_type == 'cuda':
f_dtypes = f_dtypes + [torch.half]
i_dtypes = [torch.int, torch.long]
for func in [torch.add, torch.sub, torch.rsub, torch.mul, torch.div]:
for dtype1, dtype2 in itertools.product(f_dtypes, f_dtypes + i_dtypes):
x = torch.ones(10, requires_grad=True, dtype=dtype1, device=device)
y = torch.ones(10, dtype=dtype2, device=device)
func(x, y).sum().backward()
def _get_test_tensor(self, device, dtype, remove_zeros=False):
shape = [5, 5, 5]
if dtype == torch.bool:
tensor = torch.randint(int(remove_zeros), 2, shape, device=device, dtype=dtype)
elif dtype.is_floating_point or dtype.is_complex:
# "_th_normal_ not supported on CPUType for Half" so simpler create and convert
tensor = torch.randn(shape, device=device)
tensor = tensor.to(dtype)
if remove_zeros:
tensor[torch.abs(tensor) < 0.05] = 5
else:
tensor = torch.randint(-5 if dtype.is_signed else 0, 10, shape, device=device, dtype=dtype)
if remove_zeros:
tensor[tensor == 0] = 5
return tensor
# verifies that torch.<op>(first, second) is the same as
# torch.<op>(first.to(common_dtype), second.to(common_dtype)) in cases where that should hold.
@float_double_default_dtype
def test_many_promotions(self, device):
# Can also include half on CPU in cases where it will be promoted to a
# supported dtype
dtypes1 = get_all_math_dtypes('cuda')
dtypes2 = get_all_math_dtypes(device)
ops = [torch.add, torch.sub, torch.mul, torch.div, torch.rsub]
for dt1, dt2 in itertools.product(dtypes1, dtypes2):
for op, non_contiguous in itertools.product(ops, [True, False]):
common_dtype = torch.promote_types(dt1, dt2)
if common_dtype == torch.half and self.device_type == 'cpu':
continue
if op == torch.sub and common_dtype != torch.bool:
# Subtraction, the `-` operator, with a bool tensor is not supported.
continue
first = self._get_test_tensor(device, dt1)
second = self._get_test_tensor(device, dt2, op == torch.div)
# test ops with non-contiguous tensors
if non_contiguous:
first = first.transpose(0, 2)
second = second.transpose(2, 1)
self.assertNotEqual(first.stride(), second.stride(),
msg="some non-contiguous issues could be missed if tensors have same strides")
self.assertEqual(not first.is_contiguous(), non_contiguous)
self.assertEqual(not second.is_contiguous(), non_contiguous)
result = op(first, second)
expected = op(first.to(common_dtype), second.to(common_dtype))
self.assertEqual(result.dtype, expected.dtype, msg=f'{op.__name__} with {dt1}, {dt2}')
self.assertEqual(result, expected, msg=f'{op.__name__} with {dt1}, {dt2}')
@float_double_default_dtype
def test_non_promoting_ops(self, device):
x = torch.ones(4, dtype=torch.double, device=device)
with self.assertRaises(RuntimeError):
torch.lerp(x, torch.ones(4, dtype=torch.float, device=device), 1)
@float_double_default_dtype
def test_alpha_mismatch(self, device):
x = torch.ones(4, dtype=torch.int, device=device)
err = 'alpha must not be'
self.assertRaisesRegex(RuntimeError, err,
lambda: torch.add(x, x, alpha=1.1))
x = x.to(torch.bool)
self.assertRaisesRegex(RuntimeError, err,
lambda: torch.add(x, x, alpha=1.1))
self.assertEqual(x + x, torch.add(x, x, alpha=True))
@float_double_default_dtype
def test_booleans(self, device):
onedim = torch.tensor([True], device=device)
self.assertEqual(onedim + onedim, onedim)
self.assertEqual(onedim + True, onedim)
self.assertEqual(torch.add(True, True), True)
self.assertEqual(torch.add(False, False), False)
self.assertEqual(torch.add(False, True), True)
self.assertRaisesRegex(RuntimeError, "Boolean alpha only supported",
lambda: torch.add(1, 1, alpha=True))
self.assertEqual(torch.add(torch.tensor(True, device=device),
torch.tensor(True, device=device), True),
torch.tensor(True, device=device))
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
@float_double_default_dtype
def test_create_bool_tensors(self, device):
expected = torch.tensor([0], dtype=torch.int64, device=device)
self.assertEqual(torch.arange(False, True, device=device), expected)
self.assertEqual(torch.arange(True, device=device), expected)
expected = torch.tensor([0, 0.5], dtype=torch.get_default_dtype(), device=device)
self.assertEqual(torch.arange(False, True, 0.5, device=device), expected)
expected = torch.ones(0, dtype=torch.int64, device=device)
self.assertEqual(torch.arange(False, False, device=device), expected)
bool_tensor_lin = torch.linspace(False, True, steps=100, device=device)
int_tensor_lin = torch.linspace(0, 1, steps=100, device=device)
self.assertEqual(bool_tensor_lin, int_tensor_lin)
bool_tensor_log = torch.linspace(False, True, steps=100, device=device)
int_tensor_log = torch.linspace(0, 1, steps=100, device=device)
self.assertEqual(bool_tensor_log, int_tensor_log)
# this seems like odd behavior but ints also create float tensors, numpy doesn't have this function.
self.assertEqual(torch.scalar_tensor(False, device=device), torch.tensor(0., device=device))
@dtypes(*itertools.product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)))
def test_result_type(self, device, dtypes):
"Test result_type for tensor vs tensor and scalar vs scalar."
def _get_dtype(x):
"Get the dtype of x if x is a tensor. If x is a scalar, get its corresponding dtype if it were a tensor."
if torch.is_tensor(x):
return x.dtype
elif isinstance(x, bool):
return torch.bool
elif isinstance(x, int):
return torch.int64
elif isinstance(x, float):
return torch.float32
elif isinstance(x, complex):
return torch.complex64
else:
raise AssertionError(f"Unknown type {x}")
# tensor against tensor
a_tensor = torch.tensor((0, 1), device=device, dtype=dtypes[0])
a_single_tensor = torch.tensor(1, device=device, dtype=dtypes[0])
a_scalar = a_single_tensor.item()
b_tensor = torch.tensor((1, 0), device=device, dtype=dtypes[1])
b_single_tensor = torch.tensor(1, device=device, dtype=dtypes[1])
b_scalar = b_single_tensor.item()
combo = ((a_tensor, a_single_tensor, a_scalar), (b_tensor, b_single_tensor, b_scalar))
for a, b in itertools.product(*combo):
dtype_a = _get_dtype(a)
dtype_b = _get_dtype(b)
try:
result = a + b
except RuntimeError:
with self.assertRaises(RuntimeError):
torch.promote_types(dtype_a, dtype_b)
with self.assertRaises(RuntimeError):
torch.result_type(a, b)
else:
dtype_res = _get_dtype(result)
if a is a_scalar and b is b_scalar and dtype_a == torch.bool and dtype_b == torch.bool:
# special case: in Python, True + True is an integer
self.assertEqual(dtype_res, torch.int64, f"a == {a}, b == {b}")
else:
self.assertEqual(dtype_res, torch.result_type(a, b), f"a == {a}, b == {b}")
if a is a_scalar and b is b_scalar: # Python internal type determination is good enough in this case
continue
if any(a is a0 and b is b0 for a0, b0 in zip(*combo)): # a and b belong to the same class
self.assertEqual(dtype_res, torch.promote_types(dtype_a, dtype_b), f"a == {a}, b == {b}")
# Spot check some result type for tensor against scalar (including single-element tensor).
@float_double_default_dtype
def test_result_type_tensor_vs_scalar(self, device):
def _test_spot(a, b, res_dtype):
self.assertEqual(torch.result_type(a, b), res_dtype)
self.assertEqual(torch.result_type(b, a), res_dtype)
_test_spot(torch.tensor([1, 2], dtype=torch.half, device=device),
torch.tensor(1, dtype=torch.long, device=device), torch.half)
_test_spot(torch.tensor(1, dtype=torch.float, device=device),
torch.tensor([1, 2], dtype=torch.double, device=device), torch.double)
_test_spot(torch.tensor(1, dtype=torch.int, device=device), 1, torch.int)
_test_spot(torch.tensor(1, device=device), 1., torch.get_default_dtype())
_test_spot(torch.tensor(1, dtype=torch.long, device=device),
torch.tensor([1, 1], dtype=torch.int, device=device), torch.int)
_test_spot(torch.tensor([1., 1.], dtype=torch.float, device=device), 1., torch.float)
_test_spot(torch.tensor([1., 1.], dtype=torch.complex64, device=device),
torch.tensor(1., dtype=torch.complex128, device=device), torch.complex64)
_test_spot(torch.tensor([1., 1.], dtype=torch.complex128, device=device),
torch.tensor(1., dtype=torch.complex64, device=device), torch.complex128)
_test_spot(torch.tensor([1, 1], dtype=torch.bool, device=device), 1., torch.get_default_dtype())
@float_double_default_dtype
def test_can_cast(self, device):
self.assertTrue(torch.can_cast(torch.double, torch.float))
self.assertFalse(torch.can_cast(torch.float, torch.int))
@float_double_default_dtype
def test_comparison_ops_with_type_promotion(self, device):
value_for_type = {
torch.uint8: (1 << 5),
torch.int8: (1 << 5),
torch.int16: (1 << 10),
torch.int32: (1 << 20),
torch.int64: (1 << 35),
torch.float16: (1 << 10),
torch.float32: (1 << 20),
torch.float64: (1 << 35),
torch.complex64: (1 << 20),
torch.complex128: (1 << 35)
}
comparison_ops = [
dict(
name="lt",
out_op=lambda x, y, d: torch.lt(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.lt(x, y),
compare_op=operator.lt,
),
dict(
name="le",
out_op=lambda x, y, d: torch.le(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.le(x, y),
compare_op=operator.le,
),
dict(
name="gt",
out_op=lambda x, y, d: torch.gt(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.gt(x, y),
compare_op=operator.gt,
),
dict(
name="ge",
out_op=lambda x, y, d: torch.ge(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.ge(x, y),
compare_op=operator.ge,
),
dict(
name="eq",
out_op=lambda x, y, d: torch.eq(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.eq(x, y),
compare_op=operator.eq,
),
dict(
name="ne",
out_op=lambda x, y, d: torch.ne(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.ne(x, y),
compare_op=operator.ne,
),
]
for op in comparison_ops:
for dt1 in get_all_math_dtypes(device):
for dt2 in get_all_math_dtypes(device):
if (dt1.is_complex or dt2.is_complex) and not (op["name"] == "eq" or op["name"] == "ne"):
continue
val1 = value_for_type[dt1]
val2 = value_for_type[dt2]
t1 = torch.tensor([val1], dtype=dt1, device=device)
t2 = torch.tensor([val2], dtype=dt2, device=device)
expected = torch.tensor([op["compare_op"](val1, val2)], dtype=torch.bool)
out_res = op["out_op"](t1, t2, device)
self.assertEqual(out_res, expected)
self.assertTrue(out_res.dtype == torch.bool)
self.assertTrue(t1.dtype == dt1)
self.assertTrue(t2.dtype == dt2)
out_res = op["ret_op"](t1, t2)
self.assertEqual(out_res, expected)
self.assertTrue(out_res.dtype == torch.bool)
self.assertTrue(t1.dtype == dt1)
self.assertTrue(t2.dtype == dt2)
# test that comparing a zero dim tensor with another zero dim tensor has type promotion behavior
t1 = torch.tensor(val1, dtype=dt1, device=device)
t2 = torch.tensor(val2, dtype=dt2, device=device)
expected = torch.tensor(op["compare_op"](val1, val2), dtype=torch.bool)
out_res = op["out_op"](t1, t2, device)
self.assertEqual(out_res, expected)
self.assertTrue(out_res.dtype == torch.bool)
self.assertTrue(t1.dtype == dt1)
self.assertTrue(t2.dtype == dt2)
out_res = op["ret_op"](t1, t2)
self.assertEqual(out_res, expected)
self.assertTrue(out_res.dtype == torch.bool)
self.assertTrue(t1.dtype == dt1)
self.assertTrue(t2.dtype == dt2)
# XLA tests fail for self.assertRaises for complex dtypes
@onlyNativeDeviceTypes
def test_complex_assertraises(self, device):
comparison_ops = [
dict(name="lt", compare_op=operator.lt, ),
dict(name="le", compare_op=operator.le, ),
dict(name="gt", compare_op=operator.gt, ),
dict(name="ge", compare_op=operator.ge, ),
dict(name="eq", compare_op=operator.eq, ),
dict(name="ne", compare_op=operator.ne, ),
]
for op in comparison_ops:
is_cuda = torch.device(device).type == 'cuda'
dtypes = get_all_dtypes(include_half=is_cuda,
include_bfloat16=False, include_bool=False,
include_complex32=True)
for dt1, dt2 in itertools.product(dtypes, dtypes):
if (dt1.is_complex or dt2.is_complex) and not (op["name"] == "eq" or op["name"] == "ne"):
u = torch.tensor([1], dtype=dt1, device=device)
v = torch.tensor([2], dtype=dt2, device=device)
self.assertRaises(RuntimeError, lambda: torch.tensor([op["compare_op"](u, v)], dtype=torch.bool))
@float_double_default_dtype
def test_lt_with_type_promotion(self, device):
for dt in get_all_math_dtypes(device):
x = torch.tensor([0], dtype=dt, device=device)
expected = torch.tensor([True], dtype=torch.bool, device=device)
if dt.is_complex:
continue
actual = x < 0.5
self.assertTrue(actual, expected)
self.assertTrue(actual.dtype == torch.bool)
actual = x < torch.tensor(0.5, device=device)
self.assertTrue(actual, expected)
self.assertTrue(actual.dtype == torch.bool)
x = torch.tensor(0, dtype=dt, device=device)
expected = torch.tensor(True, dtype=torch.bool, device=device)
actual = x < 0.5
self.assertTrue(actual, expected)
self.assertTrue(actual.dtype == torch.bool)
actual = x < torch.tensor(0.5, device=device)
self.assertTrue(actual, expected)
self.assertTrue(actual.dtype == torch.bool)
@float_double_default_dtype
def test_promote_types(self, device):
self.assertEqual(torch.promote_types(torch.float, torch.int), torch.float)
self.assertEqual(torch.promote_types(torch.float, torch.double), torch.double)
self.assertEqual(torch.promote_types(torch.int, torch.uint8), torch.int)
with self.assertRaisesRegex(RuntimeError, "Promotion for Float8 Types is not supported"):
self.assertEqual(torch.promote_types(torch.float8_e5m2, torch.float), torch.float)
with self.assertRaisesRegex(RuntimeError, "Promotion for Float8 Types is not supported"):
self.assertEqual(torch.promote_types(torch.float, torch.float8_e4m3fn), torch.float)
@float_double_default_dtype
def test_promote_self(self, device):
for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf, torch.bool,
torch.float8_e5m2, torch.float8_e4m3fn):
self.assertEqual(torch.promote_types(dtype, dtype), dtype)
@expectedFailureMeta
@float_double_default_dtype
def test_indexing_fail(self, device):
# https://github.com/pytorch/pytorch/issues/28010
a = torch.ones(5, 2, dtype=torch.double, device=device)
b = torch.zeros(5, dtype=torch.int, device=device)
with self.assertRaises(RuntimeError):
a[:, [1]] = b.unsqueeze(-1)
@float_double_default_dtype
def test_indexing(self, device):
x = torch.ones(5, 2, dtype=torch.double, device=device)
y = torch.zeros(5, dtype=torch.double, device=device)
x[:, [1]] = y.unsqueeze(-1)
expected = torch.tensor([(1, 0), (1, 0), (1, 0), (1, 0), (1, 0)], dtype=torch.double, device=device)
self.assertEqual(x, expected)
# https://github.com/pytorch/pytorch/issues/27824
tmp = torch.ones(9, 9, dtype=torch.float, device=device)
mask = torch.ones(10, 10, dtype=torch.uint8, device=device)
result = tmp + mask[1:, 1:]
expected = torch.full([9, 9], 2., dtype=torch.float, device=device).fill_(2.)
self.assertEqual(result, expected)
@float_double_default_dtype
def test_transpose(self, device):
# https://github.com/pytorch/pytorch/issues/28502
a = torch.tensor([[True, True], [False, True]], device=device)
self.assertEqual(a.t() == 0, a.t() == False) # noqa: E712
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
@float_double_default_dtype
def test_div_promotion(self, device, dtype):
for op in (torch.div, torch.true_divide):
dividend = (torch.randn(5, device=device) * 100).to(dtype)
divisor = torch.arange(1, 6, device=device).to(dtype)
# Tests tensor/tensor division
casting_result = dividend.to(torch.get_default_dtype()) / divisor.to(torch.get_default_dtype())
self.assertEqual(casting_result, op(dividend, divisor))
# Tests tensor/scalar division
casting_result = dividend.to(torch.get_default_dtype()) / 2
self.assertEqual(casting_result, op(dividend, 2.))
@onlyNativeDeviceTypes
@dtypes(torch.float, torch.double,
torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_div_promotion_out(self, device, dtype):
for op in (torch.div, torch.true_divide):
dividend = (torch.randn(5, device=device) * 100).to(dtype)
divisor = torch.arange(1, 6, device=device).to(dtype)
# Tests that requests for an integer quotient fail
if not dtype.is_floating_point:
integral_quotient = torch.empty(5, device=device, dtype=dtype)
with self.assertRaises(RuntimeError):
op(dividend, divisor, out=integral_quotient)
with self.assertRaises(RuntimeError):
op(dividend, 2, out=integral_quotient)
else:
# Tests that requests for a floating quotient succeed
floating_quotient = torch.empty(5, device=device, dtype=dtype)
div_result = dividend / divisor
self.assertEqual(div_result,
op(dividend, divisor, out=floating_quotient))
self.assertEqual(dividend / 2,
op(dividend, 2, out=floating_quotient))
@dtypes(torch.float, torch.double,
torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_div_promotion_inplace(self, device, dtype):
for op in (torch.Tensor.div_, torch.Tensor.true_divide_):
dividend = (torch.randn(5, device=device) * 100).to(dtype)
divisor = torch.arange(1, 6, device=device).to(dtype)
# Tests that requests for an integer quotient fail
if not dtype.is_floating_point:
with self.assertRaises(RuntimeError):
op(dividend, divisor)
with self.assertRaises(RuntimeError):
op(dividend, 2)
else:
# Tests that requests for a floating quotient succeed
div_result = dividend.clone().div_(divisor)
self.assertEqual(div_result, op(dividend.clone(), divisor))
self.assertEqual(dividend.clone().div_(2), op(dividend.clone(), 2))
def _test_sparse_op_input_tensors(self, device, dtype, coalesced, zeros=True):
t = self._get_test_tensor(device, dtype, not zeros)
if zeros and dtype != torch.bool:
# ensure sparsity. Bool should already have sufficient sparsity.
mask = self._get_test_tensor(device, torch.bool)
t = t * mask
if coalesced:
s = t.to_sparse()
else:
s = t.to_sparse()
indices = torch.cat((s.indices(), s.indices()), 1)
values = torch.cat((s.values(), s.values()), 0)
s = torch.sparse_coo_tensor(indices=indices, values=values, size=s.size(), dtype=dtype, device=device)
t = s.to_dense()
self.assertEqual(s.is_coalesced(), coalesced)
self.assertEqual(s.dtype, dtype)
self.assertEqual(t.dtype, s.dtype)
return t, s
def _get_precision(self, dtype, coalesced):
if dtype == torch.half and not coalesced:
# very low precision for uncoalesced float16 sparse tensors since
# ops like (s1 + s2).to_dense() will add four low-precision
# floating point values.
return 5e-2
if dtype == torch.half:
return 1e-3
# uses default
return None
def _test_sparse_op(self, op_name, inplace, dtype1, dtype2, device, coalesced):
if dtype1.is_complex or dtype2.is_complex:
return
suffix = '_' if inplace else ''
err = f"{' coalesced' if coalesced else 'uncoalesced'} {op_name + suffix}({dtype1}, {dtype2})"
def op(t1, t2, suf=None):
suf = suffix if suf is None else suf
return getattr(t1, op_name + suf)(t2)
add_sub = op_name == 'add' or op_name == 'sub'
(dense1, sparse1) = self._test_sparse_op_input_tensors(device, dtype1, coalesced)
(dense2, sparse2) = self._test_sparse_op_input_tensors(device, dtype2, coalesced, op_name != 'div')
common_dtype = torch.result_type(dense1, dense2)
if self.device_type == 'cpu' and common_dtype == torch.half:
self.assertRaises(RuntimeError, lambda: op(s1, d2))
# Skip inplace tests that would fail due to inability to cast to the output type.
# Some of these would also raise errors due to not being a supported op.
if inplace and not torch.can_cast(common_dtype, dtype1):
self.assertRaises(RuntimeError, lambda: op(dense1, sparse2))
self.assertRaises(RuntimeError, lambda: op(sparse1, sparse2))
self.assertRaises(RuntimeError, lambda: op(sparse1, dense2))
return
expected = op(dense1.clone(), dense2)
precision = self._get_precision(expected.dtype, coalesced)
rtol = None if precision is None else 0
test_tensors = [expected, dense1, sparse1, dense2, sparse2]
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors] if inplace else test_tensors
# Test op(sparse, sparse)
if op_name != 'div':
sparse = op(s1, s2)
self.assertEqual(sparse.dtype, e.dtype)
self.assertEqual(e, sparse.to_dense(), atol=precision, rtol=rtol, msg=err)
else:
# sparse division only supports division by a scalar
self.assertRaises(RuntimeError, lambda: op(s1, s2).to_dense())
# Test op(dense, sparse)
if add_sub or op_name == 'mul':
if inplace:
e, d1, s1, d2, s2 = (x.clone() for x in test_tensors)
dense_sparse = op(d1, s2)
dense_sparse = dense_sparse.to_dense() if dense_sparse.is_sparse else dense_sparse
self.assertEqual(e, dense_sparse, atol=precision, rtol=rtol, msg=err)
else:
# sparse division only supports division by a scalar
# mul: Didn't find kernel to dispatch to for operator 'aten::_nnz'
self.assertRaises(RuntimeError, lambda: op(d1, s2))
# Test op(sparse, dense) not supported for all ops but 'mul'.
# add(sparse, dense) is not supported. Use add(dense, sparse) instead.
# sparse division only supports division by a scalar
if op_name != 'mul':
self.assertRaises(RuntimeError, lambda: op(s1, d2))
else:
# No type promotions for inplace operations, hence suf=''
op(s1, d2, suf='')
# Test op(sparse, scalar)
if not add_sub and not (self.device_type == 'cpu' and dtype1 == torch.half):
if inplace:
e, d1, s1, d2, s2 = (x.clone() for x in test_tensors)
scalar = d2.view(d2.numel())[0].item()
sparse = op(s1, scalar)
dense_scalar = op(d1, scalar)
self.assertEqual(sparse.dtype, dense_scalar.dtype)
self.assertEqual(dense_scalar, sparse.to_dense(), atol=precision, rtol=rtol, msg=err)
else:
# add(sparse, dense) is not supported. Use add(dense, sparse) instead.
# "mul_cpu" / "div_cpu" not implemented for 'Half'
self.assertRaises(RuntimeError, lambda: op(s1, d2.view(d2.numel())[0].item()))
def _run_all_tests_for_sparse_op(self, op_name, device, dtypes):
for dtype1, dtype2 in itertools.product(dtypes, dtypes):
for inplace, coalesced in itertools.product([True, False], [True, False]):
self._test_sparse_op(op_name, inplace, dtype1, dtype2, device, coalesced)
@onlyNativeDeviceTypes
def test_sparse_add(self, device):
self._run_all_tests_for_sparse_op('add', device,
dtypes=get_all_math_dtypes(device))
@onlyNativeDeviceTypes
def test_sparse_mul(self, device):
self._run_all_tests_for_sparse_op('mul', device,
dtypes=get_all_math_dtypes(device))
@onlyNativeDeviceTypes
def test_sparse_div(self, device):
self._run_all_tests_for_sparse_op('div', device,
dtypes=(torch.float32, torch.float64,
torch.complex64, torch.complex128))
@onlyNativeDeviceTypes
def test_sparse_sub(self, device):
self._run_all_tests_for_sparse_op('sub', device,
dtypes=get_all_math_dtypes(device))
@onlyNativeDeviceTypes
@dtypes(torch.bool, torch.short, torch.uint8, torch.int, torch.long)
@float_double_default_dtype
def test_sparse_div_promotion(self, device, dtype):
for op in (torch.div, torch.true_divide):
dividend = torch.randn(5, device=device).to(dtype)
divisor = 2
dividend_sparse = dividend.to_sparse()
casting_result = dividend.to(torch.get_default_dtype()) / 2
self.assertEqual(casting_result, op(dividend_sparse, 2).to_dense())
@onlyNativeDeviceTypes
@dtypes(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)
def test_integer_addcdiv_deprecated(self, device, dtype):
t = torch.tensor(1, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, '^Integer division.+is no longer supported.+'):
torch.addcdiv(t, t, t)
with self.assertRaisesRegex(RuntimeError, '^Integer division.+is no longer supported.+'):
torch.addcdiv(t, t, t, out=t)
with self.assertRaisesRegex(RuntimeError, '^Integer division.+is no longer supported+'):
t.addcdiv_(t, t)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
@float_double_default_dtype
@onlyCPU
# NB: skip uint16,32,64 as PyTorch doesn't implement promotion for them
@dtypes(*list(itertools.product(
set(numpy_to_torch_dtype_dict.values()) - {torch.uint16, torch.uint32, torch.uint64},
set(numpy_to_torch_dtype_dict.values()) - {torch.uint16, torch.uint32, torch.uint64})))
def test_numpy_array_binary_ufunc_promotion(self, device, dtypes):
import operator
np_type = torch_to_numpy_dtype_dict[dtypes[0]]
torch_type = dtypes[1]
t = torch.tensor((1,), device=device, dtype=torch_type)
a = np.array((1,), dtype=np_type)
a_as_t = torch.from_numpy(a).to(device=device)
for np_first in (True, False):
for op in (operator.add, torch.add):
# Acquires results of binary ufunc type promotion.
try:
actual = op(a, t) if np_first else op(t, a)
except Exception as e:
actual = e
try:
expected = op(a_as_t, t) if np_first else op(t, a_as_t)
except Exception as e:
expected = e
same_result = (type(expected) == type(actual)) and expected == actual
# Note: An "undesired failure," as opposed to an "expected failure"
# is both expected (we know the test will fail) and
# undesirable (if PyTorch was working properly the test would
# not fail). This test is affected by three issues (see below)
# that will cause undesired failures. It detects when these
# issues will occur and updates this bool accordingly.
undesired_failure = False
# A NumPy array as the first argument to the plus operator
# or as any argument to torch.add is not working as
# intended.
# See https://github.com/pytorch/pytorch/issues/36363.
if np_first and op is operator.add:
undesired_failure = True
if op is torch.add:
undesired_failure = True
# Expects the same result if undesired_failure is false
# and a different result otherwise.
# Note: These cases prettyprint the failing inputs to make
# debugging test failures easier.
if undesired_failure and same_result:
msg = (
f"Failure: {actual} == {expected}. torch type was {torch_type}. "
f"NumPy type was {np_type}. np_first is {np_first} default type is "
f"{torch.get_default_dtype()}."
)
self.fail(msg)
if not undesired_failure and not same_result:
msg = (
f"Failure: {actual} != {expected}. torch type was {torch_type}. "
f"NumPy type was {np_type}. np_first is {np_first} default type is "
f"{torch.get_default_dtype()}."
)
self.fail(msg)
@onlyNativeDeviceTypes
def test_cat_different_dtypes(self, device):
dtypes = all_types_and_complex_and(torch.half, torch.bool)
for x_dtype, y_dtype in itertools.product(dtypes, dtypes):
x_vals, y_vals = [1, 2, 3], [4, 5, 6]
x = torch.tensor(x_vals, device=device, dtype=x_dtype)
y = torch.tensor(y_vals, device=device, dtype=y_dtype)
if x_dtype is torch.bool:
x_vals = [1, 1, 1]
if y_dtype is torch.bool:
y_vals = [1, 1, 1]
res_dtype = torch.result_type(x, y)
expected_res = torch.tensor(x_vals + y_vals, device=device, dtype=res_dtype)
res = torch.cat([x, y])
self.assertEqual(res, expected_res, exact_dtype=True)
# cat: full and an empty tensor.
y = torch.tensor([], device=device, dtype=y_dtype)
res_dtype = torch.result_type(x, y)
expected_res = torch.tensor(x_vals + [], device=device, dtype=res_dtype)
res = torch.cat([x, y])
self.assertEqual(res, expected_res, exact_dtype=True)
@onlyNativeDeviceTypes
def test_cat_out_different_dtypes(self, device):
dtypes = all_types_and_complex_and(torch.half)
for x_dtype, y_dtype, out_dtype in itertools.product(dtypes, dtypes, dtypes):
out = torch.zeros(6, device=device, dtype=out_dtype)
x = torch.tensor([1, 2, 3], device=device, dtype=x_dtype)
y = torch.tensor([4, 5, 6], device=device, dtype=y_dtype)
expected_out = torch.tensor([1, 2, 3, 4, 5, 6], device=device, dtype=out_dtype)
if (((x_dtype.is_floating_point or y_dtype.is_floating_point)
and not (out_dtype.is_floating_point or out_dtype.is_complex))
or ((x_dtype.is_complex or y_dtype.is_complex) and not out_dtype.is_complex)):
# This combinations do not support type conversion to a different class out type
with self.assertRaises(RuntimeError):
torch.cat([x, y], out=out)
else:
torch.cat([x, y], out=out)
self.assertEqual(out, expected_out, exact_dtype=True)
# Verfies that unary ops require matching out types
@onlyNativeDeviceTypes
@dtypes(*itertools.product((torch.int64,
torch.float32, torch.float64,
torch.complex64, torch.complex128),
(torch.int64,
torch.float32, torch.float64,
torch.complex64, torch.complex128)))
def test_unary_op_out_casting(self, device, dtypes):
t = torch.tensor((1), dtype=dtypes[0], device=device)
out = torch.empty(0, dtype=dtypes[1], device=device)
ops = (torch.neg, torch.floor, torch.ceil)
float_and_int_only_ops = {torch.floor, torch.ceil}
real_only_ops = {torch.floor, torch.ceil}
for op in ops:
if dtypes[0] is not dtypes[1]:
with self.assertRaises(RuntimeError):
op(t, out=out)
elif op in real_only_ops and dtypes[0].is_complex:
with self.assertRaises(RuntimeError):
op(t, out=out)
elif (
op in float_and_int_only_ops
and (not dtypes[0].is_floating_point and not dtypes[0].is_complex)
and (not (dtypes[0] == torch.int64 and dtypes[1] == torch.int64))
and device != "meta"
):
with self.assertRaises(RuntimeError):
op(t, out=out)
else:
self.assertEqual(op(t, out=out), op(t))
self.assertEqual(op(t, out=out), out)
# Verifies that the out= argument doesn't affect the computation, that
# is, out = op(...) and op(..., out=out) produce the same result.
@onlyNativeDeviceTypes
@skipMeta
def test_computation_ignores_out(self, device):
t = torch.tensor(33000, dtype=torch.float16, device=device)
out = torch.empty(0, dtype=torch.float64, device=device)
result = torch.add(t, t, out=out)
self.assertEqual(result, t + t, exact_dtype=False)
self.assertNotEqual(result, t.double() + t, exact_dtype=False)
a = torch.tensor(1.5, dtype=torch.float16, device=device)
b = torch.tensor(.666, dtype=torch.float16, device=device)
result = torch.true_divide(a, b, out=out)
self.assertEqual(result, a / b, exact_dtype=False)
self.assertNotEqual(result, a.double() / a, exact_dtype=False)
a = torch.tensor(5, dtype=torch.uint8, device=device)
b = torch.tensor(8, dtype=torch.uint8, device=device)
result = torch.sub(a, b, out=out)
self.assertEqual(result, a - b, exact_dtype=False)
self.assertNotEqual(result, a.double() - b, exact_dtype=False)
@onlyNativeDeviceTypes
@dtypes(*itertools.product((torch.bool, torch.int, torch.float, torch.double), repeat=3))
def test_clamp_type_promotion(self, device, dtypes):
dtype0, dtype1, dtype2 = dtypes
S = 4
def make_tensor(size, dtype):
if dtype == torch.bool:
return torch.randint(2, size, dtype=dtype, device=device)
elif dtype == torch.int:
return torch.randint(10, size, dtype=dtype, device=device)
else:
return torch.randn(size, dtype=dtype, device=device)
min_t = make_tensor((S,), dtype1)
max_t = make_tensor((S,), dtype2)
mins = (min_t, min_t[0], min_t[0].item())
maxs = (max_t, max_t[0], max_t[0].item())
inp = make_tensor((S,), dtype0)
for min_v, max_v in itertools.product(mins, maxs):
if type(max_v) != type(min_v):
continue
if isinstance(min_v, torch.Tensor) and min_v.ndim == 0 and max_v.ndim == 0:
continue # 0d tensors go to scalar overload, and it's tested separately
def expected_type(inp, max, min):
arg1, arg2 = max, min
if isinstance(max, torch.Tensor) and max.ndim == 0:
# first do a maybe dimensional boundary
arg1, arg2 = min, max
exp_type = torch.result_type(inp, arg1)
inp_new = torch.empty_like(inp, dtype=exp_type)
return torch.result_type(inp_new, arg2)
exp_type = expected_type(inp, min_v, max_v)
if exp_type != torch.bool:
actual = torch.clamp(inp, min_v, max_v)
inps = [x.to(exp_type) if isinstance(x, torch.Tensor) else x for x in (inp, min_v, max_v)]
expected = torch.clamp(inps[0], inps[1], inps[2])
self.assertEqual(actual, expected)
if inp.dtype in floating_types() or exp_type == inp.dtype:
actual = torch.clamp_(inp, min_v, max_v)
self.assertEqual(actual, expected, exact_dtype=False)
for val in mins:
def expected_type(inp, val):
return torch.result_type(inp, val)
exp_type = expected_type(inp, val)
if exp_type != torch.bool:
actual = torch.clamp_min(inp, val)
inps = [x.to(exp_type) if isinstance(x, torch.Tensor) else x for x in (inp, val)]
expected = torch.clamp_min(inps[0], inps[1])
self.assertEqual(actual.dtype, exp_type)
self.assertEqual(actual, expected)
if inp.dtype == exp_type:
actual = torch.clamp_min_(inp, val)
self.assertEqual(actual, expected)
actual = torch.clamp_max(inp, val)
expected = torch.clamp_max(inps[0], inps[1])
self.assertEqual(actual, expected)
if inp.dtype in floating_types() or exp_type == inp.dtype:
actual = torch.clamp_max_(inp, val)
self.assertEqual(actual, expected, exact_dtype=False)
@onlyNativeDeviceTypes
def test_ternary_out_promotion(self, device):
for op in [torch.addcdiv, torch.addcmul]:
for dtype in [torch.float32, torch.cfloat]:
prom_dtype = torch.float64 if dtype is torch.float32 else torch.cdouble if dtype is torch.cfloat else dtype
x = torch.rand(3, device=device, dtype=dtype)
y = torch.empty(3, device=device, dtype=dtype)
y_promo = torch.empty(3, device=device, dtype=prom_dtype)
op(x, x, x, out=y)
op(x, x, x, out=y_promo)
self.assertEqual(y, y_promo.to(dtype=dtype))
instantiate_device_type_tests(TestTypePromotion, globals())
if __name__ == '__main__':
run_tests()
|
from functools import wraps
import itertools
import unittest
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
TEST_NUMPY, set_default_dtype, torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, floating_types, get_all_dtypes
)
from torch.testing._creation import (
float_to_corresponding_complex_type_map
)
import numpy as np
load_tests = load_tests
import operator
|
from functools import wraps
import itertools
import unittest
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
TEST_NUMPY, set_default_dtype, torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict, skipIfTorchDynamo,
xfailIfTorchDynamo)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, floating_types, get_all_dtypes,
float_to_corresponding_complex_type_map,
)
import numpy as np
import operator
load_tests = load_tests
import operator
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_type_promotion.py
|
_test_sparse_op
|
def _test_sparse_op(self, op_name, inplace, dtype1, dtype2, device, coalesced):
if dtype1.is_complex or dtype2.is_complex:
return
suffix = '_' if inplace else ''
err = "{} {}({}, {})".format(" coalesced" if coalesced else "uncoalesced", op_name + suffix, dtype1, dtype2)
def op(t1, t2, suf=None):
suf = suffix if suf is None else suf
return getattr(t1, op_name + suf)(t2)
add_sub = op_name == 'add' or op_name == 'sub'
(dense1, sparse1) = self._test_sparse_op_input_tensors(device, dtype1, coalesced)
(dense2, sparse2) = self._test_sparse_op_input_tensors(device, dtype2, coalesced, op_name != 'div')
common_dtype = torch.result_type(dense1, dense2)
if self.device_type == 'cpu' and common_dtype == torch.half:
self.assertRaises(RuntimeError, lambda: op(s1, d2))
# Skip inplace tests that would fail due to inability to cast to the output type.
# Some of these would also raise errors due to not being a supported op.
if inplace and not torch.can_cast(common_dtype, dtype1):
self.assertRaises(RuntimeError, lambda: op(dense1, sparse2))
self.assertRaises(RuntimeError, lambda: op(sparse1, sparse2))
self.assertRaises(RuntimeError, lambda: op(sparse1, dense2))
return
expected = op(dense1.clone(), dense2)
precision = self._get_precision(expected.dtype, coalesced)
rtol = None if precision is None else 0
test_tensors = [expected, dense1, sparse1, dense2, sparse2]
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors] if inplace else test_tensors
# Test op(sparse, sparse)
if op_name != 'div':
sparse = op(s1, s2)
self.assertEqual(sparse.dtype, e.dtype)
self.assertEqual(e, sparse.to_dense(), atol=precision, rtol=rtol, msg=err)
else:
# sparse division only supports division by a scalar
self.assertRaises(RuntimeError, lambda: op(s1, s2).to_dense())
# Test op(dense, sparse)
if add_sub or op_name == 'mul':
if inplace:
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors]
dense_sparse = op(d1, s2)
dense_sparse = dense_sparse.to_dense() if dense_sparse.is_sparse else dense_sparse
self.assertEqual(e, dense_sparse, atol=precision, rtol=rtol, msg=err)
else:
# sparse division only supports division by a scalar
# mul: Didn't find kernel to dispatch to for operator 'aten::_nnz'
self.assertRaises(RuntimeError, lambda: op(d1, s2))
# Test op(sparse, dense) not supported for all ops but 'mul'.
# add(sparse, dense) is not supported. Use add(dense, sparse) instead.
# sparse division only supports division by a scalar
if op_name != 'mul':
self.assertRaises(RuntimeError, lambda: op(s1, d2))
else:
# No type promotions for inplace operations, hence suf=''
op(s1, d2, suf='')
# Test op(sparse, scalar)
if not add_sub and not (self.device_type == 'cpu' and dtype1 == torch.half):
if inplace:
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors]
scalar = d2.view(d2.numel())[0].item()
sparse = op(s1, scalar)
dense_scalar = op(d1, scalar)
self.assertEqual(sparse.dtype, dense_scalar.dtype)
self.assertEqual(dense_scalar, sparse.to_dense(), atol=precision, rtol=rtol, msg=err)
else:
# add(sparse, dense) is not supported. Use add(dense, sparse) instead.
# "mul_cpu" / "div_cpu" not implemented for 'Half'
self.assertRaises(RuntimeError, lambda: op(s1, d2.view(d2.numel())[0].item()))
|
def _test_sparse_op(self, op_name, inplace, dtype1, dtype2, device, coalesced):
if dtype1.is_complex or dtype2.is_complex:
return
suffix = '_' if inplace else ''
err = f"{' coalesced' if coalesced else 'uncoalesced'} {op_name + suffix}({dtype1}, {dtype2})"
def op(t1, t2, suf=None):
suf = suffix if suf is None else suf
return getattr(t1, op_name + suf)(t2)
add_sub = op_name == 'add' or op_name == 'sub'
(dense1, sparse1) = self._test_sparse_op_input_tensors(device, dtype1, coalesced)
(dense2, sparse2) = self._test_sparse_op_input_tensors(device, dtype2, coalesced, op_name != 'div')
common_dtype = torch.result_type(dense1, dense2)
if self.device_type == 'cpu' and common_dtype == torch.half:
self.assertRaises(RuntimeError, lambda: op(s1, d2))
# Skip inplace tests that would fail due to inability to cast to the output type.
# Some of these would also raise errors due to not being a supported op.
if inplace and not torch.can_cast(common_dtype, dtype1):
self.assertRaises(RuntimeError, lambda: op(dense1, sparse2))
self.assertRaises(RuntimeError, lambda: op(sparse1, sparse2))
self.assertRaises(RuntimeError, lambda: op(sparse1, dense2))
return
expected = op(dense1.clone(), dense2)
precision = self._get_precision(expected.dtype, coalesced)
rtol = None if precision is None else 0
test_tensors = [expected, dense1, sparse1, dense2, sparse2]
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors] if inplace else test_tensors
# Test op(sparse, sparse)
if op_name != 'div':
sparse = op(s1, s2)
self.assertEqual(sparse.dtype, e.dtype)
self.assertEqual(e, sparse.to_dense(), atol=precision, rtol=rtol, msg=err)
else:
# sparse division only supports division by a scalar
self.assertRaises(RuntimeError, lambda: op(s1, s2).to_dense())
# Test op(dense, sparse)
if add_sub or op_name == 'mul':
if inplace:
e, d1, s1, d2, s2 = (x.clone() for x in test_tensors)
dense_sparse = op(d1, s2)
dense_sparse = dense_sparse.to_dense() if dense_sparse.is_sparse else dense_sparse
self.assertEqual(e, dense_sparse, atol=precision, rtol=rtol, msg=err)
else:
# sparse division only supports division by a scalar
# mul: Didn't find kernel to dispatch to for operator 'aten::_nnz'
self.assertRaises(RuntimeError, lambda: op(d1, s2))
# Test op(sparse, dense) not supported for all ops but 'mul'.
# add(sparse, dense) is not supported. Use add(dense, sparse) instead.
# sparse division only supports division by a scalar
if op_name != 'mul':
self.assertRaises(RuntimeError, lambda: op(s1, d2))
else:
# No type promotions for inplace operations, hence suf=''
op(s1, d2, suf='')
# Test op(sparse, scalar)
if not add_sub and not (self.device_type == 'cpu' and dtype1 == torch.half):
if inplace:
e, d1, s1, d2, s2 = (x.clone() for x in test_tensors)
scalar = d2.view(d2.numel())[0].item()
sparse = op(s1, scalar)
dense_scalar = op(d1, scalar)
self.assertEqual(sparse.dtype, dense_scalar.dtype)
self.assertEqual(dense_scalar, sparse.to_dense(), atol=precision, rtol=rtol, msg=err)
else:
# add(sparse, dense) is not supported. Use add(dense, sparse) instead.
# "mul_cpu" / "div_cpu" not implemented for 'Half'
self.assertRaises(RuntimeError, lambda: op(s1, d2.view(d2.numel())[0].item()))
|
from functools import wraps
import itertools
import unittest
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
TEST_NUMPY, set_default_dtype, torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, floating_types, get_all_dtypes
)
from torch.testing._creation import (
float_to_corresponding_complex_type_map
)
import numpy as np
load_tests = load_tests
class TestTypePromotion(TestCase):
import operator
|
from functools import wraps
import itertools
import unittest
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
TEST_NUMPY, set_default_dtype, torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict, skipIfTorchDynamo,
xfailIfTorchDynamo)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, floating_types, get_all_dtypes,
float_to_corresponding_complex_type_map,
)
import numpy as np
import operator
load_tests = load_tests
class TestTypePromotion(TestCase):
import operator
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_type_promotion.py
|
op
|
def op(t1, t2, suf=None):
suf = suffix if suf is None else suf
return getattr(t1, op_name + suf)(t2)
add_sub = op_name == 'add' or op_name == 'sub'
(dense1, sparse1) = self._test_sparse_op_input_tensors(device, dtype1, coalesced)
(dense2, sparse2) = self._test_sparse_op_input_tensors(device, dtype2, coalesced, op_name != 'div')
common_dtype = torch.result_type(dense1, dense2)
if self.device_type == 'cpu' and common_dtype == torch.half:
self.assertRaises(RuntimeError, lambda: op(s1, d2))
# Skip inplace tests that would fail due to inability to cast to the output type.
# Some of these would also raise errors due to not being a supported op.
if inplace and not torch.can_cast(common_dtype, dtype1):
self.assertRaises(RuntimeError, lambda: op(dense1, sparse2))
self.assertRaises(RuntimeError, lambda: op(sparse1, sparse2))
self.assertRaises(RuntimeError, lambda: op(sparse1, dense2))
return
expected = op(dense1.clone(), dense2)
precision = self._get_precision(expected.dtype, coalesced)
rtol = None if precision is None else 0
test_tensors = [expected, dense1, sparse1, dense2, sparse2]
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors] if inplace else test_tensors
# Test op(sparse, sparse)
if op_name != 'div':
sparse = op(s1, s2)
self.assertEqual(sparse.dtype, e.dtype)
self.assertEqual(e, sparse.to_dense(), atol=precision, rtol=rtol, msg=err)
else:
# sparse division only supports division by a scalar
self.assertRaises(RuntimeError, lambda: op(s1, s2).to_dense())
# Test op(dense, sparse)
if add_sub or op_name == 'mul':
if inplace:
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors]
dense_sparse = op(d1, s2)
dense_sparse = dense_sparse.to_dense() if dense_sparse.is_sparse else dense_sparse
self.assertEqual(e, dense_sparse, atol=precision, rtol=rtol, msg=err)
else:
# sparse division only supports division by a scalar
# mul: Didn't find kernel to dispatch to for operator 'aten::_nnz'
self.assertRaises(RuntimeError, lambda: op(d1, s2))
# Test op(sparse, dense) not supported for all ops but 'mul'.
# add(sparse, dense) is not supported. Use add(dense, sparse) instead.
# sparse division only supports division by a scalar
if op_name != 'mul':
self.assertRaises(RuntimeError, lambda: op(s1, d2))
else:
# No type promotions for inplace operations, hence suf=''
op(s1, d2, suf='')
# Test op(sparse, scalar)
if not add_sub and not (self.device_type == 'cpu' and dtype1 == torch.half):
if inplace:
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors]
scalar = d2.view(d2.numel())[0].item()
sparse = op(s1, scalar)
dense_scalar = op(d1, scalar)
self.assertEqual(sparse.dtype, dense_scalar.dtype)
self.assertEqual(dense_scalar, sparse.to_dense(), atol=precision, rtol=rtol, msg=err)
else:
# add(sparse, dense) is not supported. Use add(dense, sparse) instead.
# "mul_cpu" / "div_cpu" not implemented for 'Half'
self.assertRaises(RuntimeError, lambda: op(s1, d2.view(d2.numel())[0].item()))
|
def op(t1, t2, suf=None):
suf = suffix if suf is None else suf
return getattr(t1, op_name + suf)(t2)
add_sub = op_name == 'add' or op_name == 'sub'
(dense1, sparse1) = self._test_sparse_op_input_tensors(device, dtype1, coalesced)
(dense2, sparse2) = self._test_sparse_op_input_tensors(device, dtype2, coalesced, op_name != 'div')
common_dtype = torch.result_type(dense1, dense2)
if self.device_type == 'cpu' and common_dtype == torch.half:
self.assertRaises(RuntimeError, lambda: op(s1, d2))
# Skip inplace tests that would fail due to inability to cast to the output type.
# Some of these would also raise errors due to not being a supported op.
if inplace and not torch.can_cast(common_dtype, dtype1):
self.assertRaises(RuntimeError, lambda: op(dense1, sparse2))
self.assertRaises(RuntimeError, lambda: op(sparse1, sparse2))
self.assertRaises(RuntimeError, lambda: op(sparse1, dense2))
return
expected = op(dense1.clone(), dense2)
precision = self._get_precision(expected.dtype, coalesced)
rtol = None if precision is None else 0
test_tensors = [expected, dense1, sparse1, dense2, sparse2]
e, d1, s1, d2, s2 = [x.clone() for x in test_tensors] if inplace else test_tensors
# Test op(sparse, sparse)
if op_name != 'div':
sparse = op(s1, s2)
self.assertEqual(sparse.dtype, e.dtype)
self.assertEqual(e, sparse.to_dense(), atol=precision, rtol=rtol, msg=err)
else:
# sparse division only supports division by a scalar
self.assertRaises(RuntimeError, lambda: op(s1, s2).to_dense())
# Test op(dense, sparse)
if add_sub or op_name == 'mul':
if inplace:
e, d1, s1, d2, s2 = (x.clone() for x in test_tensors)
dense_sparse = op(d1, s2)
dense_sparse = dense_sparse.to_dense() if dense_sparse.is_sparse else dense_sparse
self.assertEqual(e, dense_sparse, atol=precision, rtol=rtol, msg=err)
else:
# sparse division only supports division by a scalar
# mul: Didn't find kernel to dispatch to for operator 'aten::_nnz'
self.assertRaises(RuntimeError, lambda: op(d1, s2))
# Test op(sparse, dense) not supported for all ops but 'mul'.
# add(sparse, dense) is not supported. Use add(dense, sparse) instead.
# sparse division only supports division by a scalar
if op_name != 'mul':
self.assertRaises(RuntimeError, lambda: op(s1, d2))
else:
# No type promotions for inplace operations, hence suf=''
op(s1, d2, suf='')
# Test op(sparse, scalar)
if not add_sub and not (self.device_type == 'cpu' and dtype1 == torch.half):
if inplace:
e, d1, s1, d2, s2 = (x.clone() for x in test_tensors)
scalar = d2.view(d2.numel())[0].item()
sparse = op(s1, scalar)
dense_scalar = op(d1, scalar)
self.assertEqual(sparse.dtype, dense_scalar.dtype)
self.assertEqual(dense_scalar, sparse.to_dense(), atol=precision, rtol=rtol, msg=err)
else:
# add(sparse, dense) is not supported. Use add(dense, sparse) instead.
# "mul_cpu" / "div_cpu" not implemented for 'Half'
self.assertRaises(RuntimeError, lambda: op(s1, d2.view(d2.numel())[0].item()))
|
from functools import wraps
import itertools
import unittest
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
TEST_NUMPY, set_default_dtype, torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, floating_types, get_all_dtypes
)
from torch.testing._creation import (
float_to_corresponding_complex_type_map
)
import numpy as np
load_tests = load_tests
import operator
|
from functools import wraps
import itertools
import unittest
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
TEST_NUMPY, set_default_dtype, torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict, skipIfTorchDynamo,
xfailIfTorchDynamo)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, floating_types, get_all_dtypes,
float_to_corresponding_complex_type_map,
)
import numpy as np
import operator
load_tests = load_tests
import operator
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_type_promotion.py
|
make_tensor
|
def make_tensor(size, dtype):
if dtype == torch.bool:
return torch.randint(2, size, dtype=dtype, device=device)
elif dtype == torch.int:
return torch.randint(10, size, dtype=dtype, device=device)
else:
return torch.randn(size, dtype=dtype, device=device)
min_t = make_tensor((S,), dtype1)
max_t = make_tensor((S,), dtype2)
mins = (min_t, min_t[0], min_t[0].item())
maxs = (max_t, max_t[0], max_t[0].item())
inp = make_tensor((S,), dtype0)
for min_v, max_v in itertools.product(mins, maxs):
if type(max_v) != type(min_v):
continue
if isinstance(min_v, torch.Tensor) and min_v.ndim == 0 and max_v.ndim == 0:
continue # 0d tensors go to scalar overload, and it's tested separately
def expected_type(inp, max, min):
arg1, arg2 = max, min
if isinstance(max, torch.Tensor) and max.ndim == 0:
# first do a maybe dimensional boundary
arg1, arg2 = min, max
exp_type = torch.result_type(inp, arg1)
inp_new = torch.empty_like(inp, dtype=exp_type)
return torch.result_type(inp_new, arg2)
exp_type = expected_type(inp, min_v, max_v)
if exp_type != torch.bool:
actual = torch.clamp(inp, min_v, max_v)
inps = list(map(lambda x: x.to(exp_type) if isinstance(x, torch.Tensor) else x,
(inp, min_v, max_v)))
expected = torch.clamp(inps[0], inps[1], inps[2])
self.assertEqual(actual, expected)
if inp.dtype in floating_types() or exp_type == inp.dtype:
actual = torch.clamp_(inp, min_v, max_v)
self.assertEqual(actual, expected, exact_dtype=False)
for val in mins:
def expected_type(inp, val):
return torch.result_type(inp, val)
exp_type = expected_type(inp, val)
if exp_type != torch.bool:
actual = torch.clamp_min(inp, val)
inps = list(map(lambda x: x.to(exp_type) if isinstance(x, torch.Tensor) else x,
(inp, val)))
expected = torch.clamp_min(inps[0], inps[1])
self.assertEqual(actual.dtype, exp_type)
self.assertEqual(actual, expected)
if inp.dtype == exp_type:
actual = torch.clamp_min_(inp, val)
self.assertEqual(actual, expected)
actual = torch.clamp_max(inp, val)
expected = torch.clamp_max(inps[0], inps[1])
self.assertEqual(actual, expected)
if inp.dtype in floating_types() or exp_type == inp.dtype:
actual = torch.clamp_max_(inp, val)
self.assertEqual(actual, expected, exact_dtype=False)
|
def make_tensor(size, dtype):
if dtype == torch.bool:
return torch.randint(2, size, dtype=dtype, device=device)
elif dtype == torch.int:
return torch.randint(10, size, dtype=dtype, device=device)
else:
return torch.randn(size, dtype=dtype, device=device)
min_t = make_tensor((S,), dtype1)
max_t = make_tensor((S,), dtype2)
mins = (min_t, min_t[0], min_t[0].item())
maxs = (max_t, max_t[0], max_t[0].item())
inp = make_tensor((S,), dtype0)
for min_v, max_v in itertools.product(mins, maxs):
if type(max_v) != type(min_v):
continue
if isinstance(min_v, torch.Tensor) and min_v.ndim == 0 and max_v.ndim == 0:
continue # 0d tensors go to scalar overload, and it's tested separately
def expected_type(inp, max, min):
arg1, arg2 = max, min
if isinstance(max, torch.Tensor) and max.ndim == 0:
# first do a maybe dimensional boundary
arg1, arg2 = min, max
exp_type = torch.result_type(inp, arg1)
inp_new = torch.empty_like(inp, dtype=exp_type)
return torch.result_type(inp_new, arg2)
exp_type = expected_type(inp, min_v, max_v)
if exp_type != torch.bool:
actual = torch.clamp(inp, min_v, max_v)
inps = [x.to(exp_type) if isinstance(x, torch.Tensor) else x for x in (inp, min_v, max_v)]
expected = torch.clamp(inps[0], inps[1], inps[2])
self.assertEqual(actual, expected)
if inp.dtype in floating_types() or exp_type == inp.dtype:
actual = torch.clamp_(inp, min_v, max_v)
self.assertEqual(actual, expected, exact_dtype=False)
for val in mins:
def expected_type(inp, val):
return torch.result_type(inp, val)
exp_type = expected_type(inp, val)
if exp_type != torch.bool:
actual = torch.clamp_min(inp, val)
inps = [x.to(exp_type) if isinstance(x, torch.Tensor) else x for x in (inp, val)]
expected = torch.clamp_min(inps[0], inps[1])
self.assertEqual(actual.dtype, exp_type)
self.assertEqual(actual, expected)
if inp.dtype == exp_type:
actual = torch.clamp_min_(inp, val)
self.assertEqual(actual, expected)
actual = torch.clamp_max(inp, val)
expected = torch.clamp_max(inps[0], inps[1])
self.assertEqual(actual, expected)
if inp.dtype in floating_types() or exp_type == inp.dtype:
actual = torch.clamp_max_(inp, val)
self.assertEqual(actual, expected, exact_dtype=False)
|
from functools import wraps
import itertools
import unittest
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
TEST_NUMPY, set_default_dtype, torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, floating_types, get_all_dtypes
)
from torch.testing._creation import (
float_to_corresponding_complex_type_map
)
import numpy as np
load_tests = load_tests
import operator
|
from functools import wraps
import itertools
import unittest
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
TEST_NUMPY, set_default_dtype, torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict, skipIfTorchDynamo,
xfailIfTorchDynamo)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, floating_types, get_all_dtypes,
float_to_corresponding_complex_type_map,
)
import numpy as np
import operator
load_tests = load_tests
import operator
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_type_promotion.py
|
expected_type
|
def expected_type(inp, max, min):
arg1, arg2 = max, min
if isinstance(max, torch.Tensor) and max.ndim == 0:
# first do a maybe dimensional boundary
arg1, arg2 = min, max
exp_type = torch.result_type(inp, arg1)
inp_new = torch.empty_like(inp, dtype=exp_type)
return torch.result_type(inp_new, arg2)
exp_type = expected_type(inp, min_v, max_v)
if exp_type != torch.bool:
actual = torch.clamp(inp, min_v, max_v)
inps = list(map(lambda x: x.to(exp_type) if isinstance(x, torch.Tensor) else x,
(inp, min_v, max_v)))
expected = torch.clamp(inps[0], inps[1], inps[2])
self.assertEqual(actual, expected)
if inp.dtype in floating_types() or exp_type == inp.dtype:
actual = torch.clamp_(inp, min_v, max_v)
self.assertEqual(actual, expected, exact_dtype=False)
|
def expected_type(inp, max, min):
arg1, arg2 = max, min
if isinstance(max, torch.Tensor) and max.ndim == 0:
# first do a maybe dimensional boundary
arg1, arg2 = min, max
exp_type = torch.result_type(inp, arg1)
inp_new = torch.empty_like(inp, dtype=exp_type)
return torch.result_type(inp_new, arg2)
exp_type = expected_type(inp, min_v, max_v)
if exp_type != torch.bool:
actual = torch.clamp(inp, min_v, max_v)
inps = [x.to(exp_type) if isinstance(x, torch.Tensor) else x for x in (inp, min_v, max_v)]
expected = torch.clamp(inps[0], inps[1], inps[2])
self.assertEqual(actual, expected)
if inp.dtype in floating_types() or exp_type == inp.dtype:
actual = torch.clamp_(inp, min_v, max_v)
self.assertEqual(actual, expected, exact_dtype=False)
|
from functools import wraps
import itertools
import unittest
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
TEST_NUMPY, set_default_dtype, torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, floating_types, get_all_dtypes
)
from torch.testing._creation import (
float_to_corresponding_complex_type_map
)
import numpy as np
load_tests = load_tests
import operator
|
from functools import wraps
import itertools
import unittest
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
TEST_NUMPY, set_default_dtype, torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict, skipIfTorchDynamo,
xfailIfTorchDynamo)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, floating_types, get_all_dtypes,
float_to_corresponding_complex_type_map,
)
import numpy as np
import operator
load_tests = load_tests
import operator
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_type_promotion.py
|
expected_type
|
def expected_type(inp, max, min):
arg1, arg2 = max, min
if isinstance(max, torch.Tensor) and max.ndim == 0:
# first do a maybe dimensional boundary
arg1, arg2 = min, max
exp_type = torch.result_type(inp, arg1)
inp_new = torch.empty_like(inp, dtype=exp_type)
return torch.result_type(inp_new, arg2)
exp_type = expected_type(inp, min_v, max_v)
if exp_type != torch.bool:
actual = torch.clamp(inp, min_v, max_v)
inps = list(map(lambda x: x.to(exp_type) if isinstance(x, torch.Tensor) else x,
(inp, min_v, max_v)))
expected = torch.clamp(inps[0], inps[1], inps[2])
self.assertEqual(actual, expected)
if inp.dtype in floating_types() or exp_type == inp.dtype:
actual = torch.clamp_(inp, min_v, max_v)
self.assertEqual(actual, expected, exact_dtype=False)
|
def expected_type(inp, max, min):
arg1, arg2 = max, min
if isinstance(max, torch.Tensor) and max.ndim == 0:
# first do a maybe dimensional boundary
arg1, arg2 = min, max
exp_type = torch.result_type(inp, arg1)
inp_new = torch.empty_like(inp, dtype=exp_type)
return torch.result_type(inp_new, arg2)
exp_type = expected_type(inp, min_v, max_v)
if exp_type != torch.bool:
actual = torch.clamp(inp, min_v, max_v)
inps = [x.to(exp_type) if isinstance(x, torch.Tensor) else x for x in (inp, min_v, max_v)]
expected = torch.clamp(inps[0], inps[1], inps[2])
self.assertEqual(actual, expected)
if inp.dtype in floating_types() or exp_type == inp.dtype:
actual = torch.clamp_(inp, min_v, max_v)
self.assertEqual(actual, expected, exact_dtype=False)
|
from functools import wraps
import itertools
import unittest
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
TEST_NUMPY, set_default_dtype, torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict, skipIfTorchDynamo)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, floating_types, get_all_dtypes
)
from torch.testing._creation import (
float_to_corresponding_complex_type_map
)
import numpy as np
load_tests = load_tests
import operator
|
from functools import wraps
import itertools
import unittest
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
TEST_NUMPY, set_default_dtype, torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict, skipIfTorchDynamo,
xfailIfTorchDynamo)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, floating_types, get_all_dtypes,
float_to_corresponding_complex_type_map,
)
import numpy as np
import operator
load_tests = load_tests
import operator
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_type_promotion.py
|
test_ternary_out_promotion
|
instantiate_device_type_tests(TestTypePromotion, globals())
if __name__ == '__main__':
run_tests()
|
def test_ternary_out_promotion(self, device):
for op in [torch.addcdiv, torch.addcmul]:
for dtype in [torch.float32, torch.cfloat]:
prom_dtype = torch.float64 if dtype is torch.float32 else torch.cdouble if dtype is torch.cfloat else dtype
x = torch.rand(3, device=device, dtype=dtype)
y = torch.empty(3, device=device, dtype=dtype)
y_promo = torch.empty(3, device=device, dtype=prom_dtype)
op(x, x, x, out=y)
op(x, x, x, out=y_promo)
self.assertEqual(y, y_promo.to(dtype=dtype))
|
from functools import wraps
import itertools
import unittest
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests, make_tensor,
TEST_NUMPY, set_default_dtype, torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict, skipIfTorchDynamo,
xfailIfTorchDynamo)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes,
dtypes, onlyCPU, expectedFailureMeta, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, get_all_math_dtypes, floating_types, get_all_dtypes,
float_to_corresponding_complex_type_map,
)
import numpy as np
import operator
load_tests = load_tests
class TestTypePromotion(TestCase):
import operator
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_typing.py
|
test_reveal
|
def test_reveal(path):
__tracebackhide__ = True
with open(path) as fin:
lines = _parse_reveals(fin)
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for error_line in output_mypy[path]:
match = re.match(
r"^.+\.py:(?P<lineno>\d+): note: .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected reveal line format: {error_line}")
lineno = int(match.group("lineno")) - 1
assert "Revealed type is" in error_line
marker = lines[lineno]
_test_reveal(path, marker, error_line, 1 + lineno)
_REVEAL_MSG = """Reveal mismatch at line {}
Expected reveal: {!r}
Observed reveal: {!r}
"""
|
def test_reveal(self, path):
__tracebackhide__ = True
with open(path) as fin:
lines = _parse_reveals(fin)
output_mypy = self.get_mypy_output()
assert path in output_mypy
for error_line in output_mypy[path]:
match = re.match(
r"^.+\.py:(?P<lineno>\d+):(?P<colno>\d+): note: .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected reveal line format: {error_line}")
lineno = int(match.group("lineno")) - 1
assert "Revealed type is" in error_line
marker = lines[lineno]
_test_reveal(path, marker, error_line, 1 + lineno)
|
import itertools
import os
import re
import shutil
from collections import defaultdict
from typing import IO, Dict, List, Optional
import pytest
from mypy import api
DATA_DIR = os.path.join(os.path.dirname(__file__), "typing")
REVEAL_DIR = os.path.join(DATA_DIR, "reveal")
PASS_DIR = os.path.join(DATA_DIR, "pass")
FAIL_DIR = os.path.join(DATA_DIR, "fail")
MYPY_INI = os.path.join(DATA_DIR, os.pardir, os.pardir, "mypy.ini")
CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache")
_FAIL_MSG1 = """Extra error at line {}
Extra error: {!r}
"""
_FAIL_MSG2 = """Error mismatch at line {}
Expected error: {!r}
Observed error: {!r}
"""
|
import itertools
import os
import re
import shutil
import unittest
from collections import defaultdict
from threading import Lock
from typing import Dict, IO, List, Optional
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TestCase,
)
from mypy import api
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "typing"))
REVEAL_DIR = os.path.join(DATA_DIR, "reveal")
PASS_DIR = os.path.join(DATA_DIR, "pass")
FAIL_DIR = os.path.join(DATA_DIR, "fail")
MYPY_INI = os.path.join(DATA_DIR, os.pardir, os.pardir, "mypy.ini")
CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache")
_FAIL_MSG1 = """Extra error at line {}
Extra error: {!r}
"""
_FAIL_MSG2 = """Error mismatch at line {}
Expected error: {!r}
Observed error: {!r}
"""
_REVEAL_MSG = """Reveal mismatch at line {}
Expected reveal: {!r}
Observed reveal: {!r}
"""
@unittest.skipIf(NO_MYPY, reason="Mypy is not installed")
class TestTyping(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_typing.py
|
test_reveal
|
def test_reveal(path):
__tracebackhide__ = True
with open(path) as fin:
lines = _parse_reveals(fin)
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for error_line in output_mypy[path]:
match = re.match(
r"^.+\.py:(?P<lineno>\d+): note: .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected reveal line format: {error_line}")
lineno = int(match.group("lineno")) - 1
assert "Revealed type is" in error_line
marker = lines[lineno]
_test_reveal(path, marker, error_line, 1 + lineno)
_REVEAL_MSG = """Reveal mismatch at line {}
Expected reveal: {!r}
Observed reveal: {!r}
"""
|
def test_reveal(self, path):
__tracebackhide__ = True
with open(path) as fin:
lines = _parse_reveals(fin)
output_mypy = self.get_mypy_output()
assert path in output_mypy
for error_line in output_mypy[path]:
match = re.match(
r"^.+\.py:(?P<lineno>\d+):(?P<colno>\d+): note: .+$",
error_line,
)
if match is None:
raise ValueError(f"Unexpected reveal line format: {error_line}")
lineno = int(match.group("lineno")) - 1
assert "Revealed type is" in error_line
marker = lines[lineno]
_test_reveal(path, marker, error_line, 1 + lineno)
|
import itertools
import os
import re
import shutil
from collections import defaultdict
from typing import IO, Dict, List, Optional
import pytest
from mypy import api
DATA_DIR = os.path.join(os.path.dirname(__file__), "typing")
REVEAL_DIR = os.path.join(DATA_DIR, "reveal")
PASS_DIR = os.path.join(DATA_DIR, "pass")
FAIL_DIR = os.path.join(DATA_DIR, "fail")
MYPY_INI = os.path.join(DATA_DIR, os.pardir, os.pardir, "mypy.ini")
CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache")
_FAIL_MSG1 = """Extra error at line {}
Extra error: {!r}
"""
_FAIL_MSG2 = """Error mismatch at line {}
Expected error: {!r}
Observed error: {!r}
"""
|
import itertools
import os
import re
import shutil
import unittest
from collections import defaultdict
from threading import Lock
from typing import Dict, IO, List, Optional
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TestCase,
)
from mypy import api
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "typing"))
REVEAL_DIR = os.path.join(DATA_DIR, "reveal")
PASS_DIR = os.path.join(DATA_DIR, "pass")
FAIL_DIR = os.path.join(DATA_DIR, "fail")
MYPY_INI = os.path.join(DATA_DIR, os.pardir, os.pardir, "mypy.ini")
CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache")
_FAIL_MSG1 = """Extra error at line {}
Extra error: {!r}
"""
_FAIL_MSG2 = """Error mismatch at line {}
Expected error: {!r}
Observed error: {!r}
"""
_REVEAL_MSG = """Reveal mismatch at line {}
Expected reveal: {!r}
Observed reveal: {!r}
"""
@unittest.skipIf(NO_MYPY, reason="Mypy is not installed")
class TestTyping(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_unary_ufuncs.py
|
test_float_domains
|
def test_float_domains(self, device, dtype, op):
eps = (1e-5, 1e-3, 1e-1, 1, 2, 10, 20, 50, 100)
low, high = op.domain
# NOTE: the following two loops are separated for readability
if low is not None:
low_tensor = torch.tensor(low, device=device, dtype=dtype)
for epsilon in eps:
lower_tensor = low_tensor - epsilon
# Skips the test if the difference is not representable,
# which can occur if, for example, the difference is small
# and the dtype is imprecise (like bfloat16 is)
if lower_tensor.item() == low_tensor.item():
continue
result = op(lower_tensor)
self.assertEqual(
result.item(),
float("nan"),
msg=(
"input of {0} outside lower domain boundary"
" {1} produced {2}, not nan!"
).format(lower_tensor.item(), low, result.item()),
)
if high is not None:
high_tensor = torch.tensor(high, device=device, dtype=dtype)
for epsilon in eps:
higher_tensor = high_tensor + epsilon
# See above comment
if higher_tensor.item() == high_tensor.item():
continue
result = op(higher_tensor)
self.assertEqual(
result.item(),
float("nan"),
msg=(
"input of {0} outside upper domain boundary"
" {1} produced {2}, not nan!"
).format(higher_tensor.item(), high, result.item()),
)
# Helper for comparing torch tensors and numpy arrays
# TODO: should this or assertEqual also validate that strides are equal?
|
def test_float_domains(self, device, dtype, op):
eps = (1e-5, 1e-3, 1e-1, 1, 2, 10, 20, 50, 100)
low, high = op.domain
# NOTE: the following two loops are separated for readability
if low is not None:
low_tensor = torch.tensor(low, device=device, dtype=dtype)
for epsilon in eps:
lower_tensor = low_tensor - epsilon
# Skips the test if the difference is not representable,
# which can occur if, for example, the difference is small
# and the dtype is imprecise (like bfloat16 is)
if lower_tensor.item() == low_tensor.item():
continue
result = op(lower_tensor)
self.assertEqual(
result.item(),
float("nan"),
msg=(
f"input of {lower_tensor.item()} outside lower domain boundary"
f" {low} produced {result.item()}, not nan!"
),
)
if high is not None:
high_tensor = torch.tensor(high, device=device, dtype=dtype)
for epsilon in eps:
higher_tensor = high_tensor + epsilon
# See above comment
if higher_tensor.item() == high_tensor.item():
continue
result = op(higher_tensor)
self.assertEqual(
result.item(),
float("nan"),
msg=(
f"input of {higher_tensor.item()} outside upper domain boundary"
f" {high} produced {result.item()}, not nan!"
),
)
# Helper for comparing torch tensors and numpy arrays
# TODO: should this or assertEqual also validate that strides are equal?
|
import torch
import numpy as np
import math
from numbers import Number
import random
import unittest
from torch import inf, nan
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict,
suppress_warnings,
TEST_SCIPY,
slowTest,
skipIfNoSciPy,
IS_WINDOWS,
gradcheck,
TEST_WITH_ASAN,
)
from torch.testing._internal.common_methods_invocations import (
unary_ufuncs,
generate_elementwise_unary_tensors,
generate_elementwise_unary_small_value_tensors,
generate_elementwise_unary_large_value_tensors,
generate_elementwise_unary_extremal_value_tensors,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
dtypes,
onlyCPU,
onlyNativeDeviceTypes,
onlyCUDA,
dtypesIfCUDA,
precisionOverride,
dtypesIfCPU,
)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_types_and,
all_types_and_complex_and,
integral_types_and,
get_all_math_dtypes,
complex_types,
all_types_and,
floating_and_complex_types_and,
)
import scipy
reference_filtered_ops = list(filter(lambda op: op.ref is not None, unary_ufuncs))
class TestUnaryUfuncs(TestCase):
from random import random
|
import torch
import numpy as np
import math
from numbers import Number
import random
import unittest
from torch import inf, nan
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
torch_to_numpy_dtype_dict,
numpy_to_torch_dtype_dict,
suppress_warnings,
TEST_SCIPY,
slowTest,
skipIfNoSciPy,
IS_WINDOWS,
gradcheck,
is_iterable_of_tensors,
xfailIfTorchDynamo,
)
from torch.testing._internal.common_methods_invocations import (
unary_ufuncs,
generate_elementwise_unary_tensors,
generate_elementwise_unary_small_value_tensors,
generate_elementwise_unary_large_value_tensors,
generate_elementwise_unary_extremal_value_tensors,
)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
dtypes,
onlyCPU,
onlyNativeDeviceTypes,
onlyCUDA,
dtypesIfCUDA,
precisionOverride,
dtypesIfCPU,
)
from torch.utils import _pytree as pytree
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
floating_types_and,
all_types_and_complex_and,
integral_types_and,
get_all_math_dtypes,
complex_types,
floating_and_complex_types_and,
)
import scipy
reference_filtered_ops = list(filter(lambda op: op.ref is not None, unary_ufuncs))
class TestUnaryUfuncs(TestCase):
from random import random
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
test_checkpoint_trigger
|
def test_checkpoint_trigger(self):
class Net(nn.Module):
def __init__(self):
super().__init__()
self.counter = 0
def forward(self, input_var):
self.counter += 1
# For reentrant, need to have autograd actually
# pack a tensor to trigger recomp
ret = input_var * torch.tensor(2.)
return ret
# checkpointed
for use_reentrant in [True, False]:
with self.subTest(use_reentrant=use_reentrant):
modules = [Net() for _ in range(10)]
for m in modules:
self.assertEqual(m.counter, 0)
input_var = torch.randn(3, 4, requires_grad=True)
out = checkpoint_sequential(modules, 2, input_var, use_reentrant=use_reentrant)
for m in modules:
self.assertEqual(m.counter, 1)
out.sum().backward()
for m in modules[:(len(modules) // 2)]:
self.assertEqual(m.counter, 2)
for m in modules[(len(modules) // 2):]:
self.assertEqual(m.counter, 1)
|
def test_checkpoint_trigger(self):
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.counter = 0
def forward(self, input_var):
self.counter += 1
# For reentrant, need to have autograd actually
# pack a tensor to trigger recomp
ret = input_var * torch.tensor(2.0)
return ret
# checkpointed
for use_reentrant in [True, False]:
with self.subTest(use_reentrant=use_reentrant):
modules = [Net() for _ in range(10)]
for m in modules:
self.assertEqual(m.counter, 0)
input_var = torch.randn(3, 4, requires_grad=True)
out = checkpoint_sequential(
modules, 2, input_var, use_reentrant=use_reentrant
)
for m in modules:
self.assertEqual(m.counter, 1)
out.sum().backward()
for m in modules[: (len(modules) // 2)]:
self.assertEqual(m.counter, 2)
for m in modules[(len(modules) // 2) :]:
self.assertEqual(m.counter, 1)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
class TestCheckpoint(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
class TestCheckpoint(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
__init__
|
def __init__(self):
super().__init__()
self.counter = 0
|
def __init__(self) -> None:
super().__init__()
self.counter = 0
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
class Net(nn.Module):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
class Net(nn.Module):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
test_checkpoint_valid
|
def test_checkpoint_valid(self):
model = nn.Sequential(
nn.Linear(100, 50),
nn.ReLU(),
nn.Linear(50, 20),
nn.ReLU(),
nn.Linear(20, 5),
nn.ReLU()
)
input_var = torch.randn(1, 100, requires_grad=True)
# checkpointed
chunks = 2
modules = list(model.children())
out = checkpoint_sequential(modules, chunks, input_var)
with self.assertRaisesRegex(RuntimeError, "Checkpointing is not compatible"):
torch.autograd.grad(
outputs=[out], grad_outputs=[torch.ones(1, 5)], inputs=[input_var], create_graph=True
)
# works with use_reentrant=False, and grads are the same
out = model(input_var)
grads_no_checkpoint = torch.autograd.grad(
outputs=[out], grad_outputs=[torch.ones(1, 5)], inputs=[input_var], create_graph=True,
)
out_checkpoint = checkpoint_sequential(modules, chunks, input_var, use_reentrant=False)
# check outputs are the same
self.assertEqual(out_checkpoint, out)
grads_checkpoint = torch.autograd.grad(
outputs=[out_checkpoint], grad_outputs=[torch.ones(1, 5)], inputs=[input_var], create_graph=True,
)
self.assertEqual(grads_no_checkpoint, grads_checkpoint)
|
def test_checkpoint_valid(self):
model = nn.Sequential(
nn.Linear(100, 50),
nn.ReLU(),
nn.Linear(50, 20),
nn.ReLU(),
nn.Linear(20, 5),
nn.ReLU(),
)
input_var = torch.randn(1, 100, requires_grad=True)
# checkpointed
chunks = 2
modules = list(model.children())
out = checkpoint_sequential(modules, chunks, input_var, use_reentrant=True)
with self.assertRaisesRegex(
RuntimeError, "torch.utils.checkpoint is incompatible"
):
torch.autograd.grad(
outputs=[out],
grad_outputs=[torch.ones(1, 5)],
inputs=[input_var],
create_graph=True,
)
# works with use_reentrant=False, and grads are the same
out = model(input_var)
grads_no_checkpoint = torch.autograd.grad(
outputs=[out],
grad_outputs=[torch.ones(1, 5)],
inputs=[input_var],
create_graph=True,
)
out_checkpoint = checkpoint_sequential(
modules, chunks, input_var, use_reentrant=False
)
# check outputs are the same
self.assertEqual(out_checkpoint, out)
grads_checkpoint = torch.autograd.grad(
outputs=[out_checkpoint],
grad_outputs=[torch.ones(1, 5)],
inputs=[input_var],
create_graph=True,
)
self.assertEqual(grads_no_checkpoint, grads_checkpoint)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
class TestCheckpoint(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
class TestCheckpoint(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
test_checkpoint_module_list
|
def test_checkpoint_module_list(self):
class ModuleListNet(nn.Module):
def __init__(self):
super().__init__()
module_list = [
nn.Linear(100, 50),
nn.ReLU(),
nn.Linear(50, 20),
nn.ReLU(),
nn.Linear(20, 5),
nn.ReLU(),
]
self.module_list = nn.ModuleList(module_list)
def forward(self, input):
for layer in self.module_list:
input = layer(input)
return input
for use_reentrant in [True, False]:
with self.subTest(use_reentrant=use_reentrant):
model = ModuleListNet()
# Compare uncheckpointed model with its checkpointed counterparts.
self._check_checkpoint_sequential(
model,
[list(model.module_list.children()), model.module_list],
2,
torch.randn(1, 100, requires_grad=True),
use_reentrant=use_reentrant,
)
|
def test_checkpoint_module_list(self):
class ModuleListNet(nn.Module):
def __init__(self) -> None:
super().__init__()
module_list = [
nn.Linear(100, 50),
nn.ReLU(),
nn.Linear(50, 20),
nn.ReLU(),
nn.Linear(20, 5),
nn.ReLU(),
]
self.module_list = nn.ModuleList(module_list)
def forward(self, input):
for layer in self.module_list:
input = layer(input)
return input
for use_reentrant in [True, False]:
with self.subTest(use_reentrant=use_reentrant):
model = ModuleListNet()
# Compare uncheckpointed model with its checkpointed counterparts.
self._check_checkpoint_sequential(
model,
[list(model.module_list.children()), model.module_list],
2,
torch.randn(1, 100, requires_grad=True),
use_reentrant=use_reentrant,
)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
class TestCheckpoint(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
class TestCheckpoint(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
__init__
|
def __init__(self):
super().__init__()
self.counter = 0
|
def __init__(self) -> None:
super().__init__()
self.counter = 0
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
class Net(nn.Module):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
class Net(nn.Module):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
run_fn
|
def run_fn(input):
return phase2(input)
state = torch.get_rng_state()
out = phase1(inp)
out = checkpoint(run_fn, out)
out.sum().backward()
grad_with_checkpointing = inp.grad
torch.set_rng_state(state)
inp.grad = None
out = phase1(inp)
out = run_fn(out)
out.sum().backward()
grad_no_checkpointing = inp.grad
self.assertEqual(grad_with_checkpointing, grad_no_checkpointing)
|
def run_fn(input):
return phase2(input)
state = torch.get_rng_state()
out = phase1(inp)
out = checkpoint(run_fn, out, use_reentrant=True)
out.sum().backward()
grad_with_checkpointing = inp.grad
torch.set_rng_state(state)
inp.grad = None
out = phase1(inp)
out = run_fn(out)
out.sum().backward()
grad_no_checkpointing = inp.grad
self.assertEqual(grad_with_checkpointing, grad_no_checkpointing)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
run_fn
|
def run_fn(input):
return phase2(input)
state = torch.get_rng_state()
out = phase1(inp)
out = checkpoint(run_fn, out)
out.sum().backward()
grad_with_checkpointing = inp.grad
torch.set_rng_state(state)
inp.grad = None
out = phase1(inp)
out = run_fn(out)
out.sum().backward()
grad_no_checkpointing = inp.grad
self.assertEqual(grad_with_checkpointing, grad_no_checkpointing)
|
def run_fn(input):
return phase2(input)
state = torch.get_rng_state()
out = phase1(inp)
out = checkpoint(run_fn, out, use_reentrant=True)
out.sum().backward()
grad_with_checkpointing = inp.grad
torch.set_rng_state(state)
inp.grad = None
out = phase1(inp)
out = run_fn(out)
out.sum().backward()
grad_no_checkpointing = inp.grad
self.assertEqual(grad_with_checkpointing, grad_no_checkpointing)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
run_fn
|
def run_fn(input):
return phase2(input)
state = torch.get_rng_state()
out = phase1(inp)
out = checkpoint(run_fn, out)
out.sum().backward()
grad_with_checkpointing = inp.grad
torch.set_rng_state(state)
inp.grad = None
out = phase1(inp)
out = run_fn(out)
out.sum().backward()
grad_no_checkpointing = inp.grad
self.assertEqual(grad_with_checkpointing, grad_no_checkpointing)
|
def run_fn(input):
return phase2(input)
state = torch.get_rng_state()
out = phase1(inp)
out = checkpoint(run_fn, out, use_reentrant=True)
out.sum().backward()
grad_with_checkpointing = inp.grad
torch.set_rng_state(state)
inp.grad = None
out = phase1(inp)
out = run_fn(out)
out.sum().backward()
grad_no_checkpointing = inp.grad
self.assertEqual(grad_with_checkpointing, grad_no_checkpointing)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
test_checkpoint_non_tensor
|
def test_checkpoint_non_tensor(self):
def run_fn(tensor1, tensor2):
if tensor2 is None:
return tensor1
return tensor1 + tensor2
input_var = torch.randn(1, 100, requires_grad=True)
out = checkpoint(run_fn, input_var, None)
out.sum().backward()
|
def test_checkpoint_non_tensor(self):
def run_fn(tensor1, tensor2):
if tensor2 is None:
return tensor1
return tensor1 + tensor2
input_var = torch.randn(1, 100, requires_grad=True)
out = checkpoint(run_fn, input_var, None, use_reentrant=True)
out.sum().backward()
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
class TestCheckpoint(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
class TestCheckpoint(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
run_fn
|
def run_fn(input):
return phase2(input)
state = torch.get_rng_state()
out = phase1(inp)
out = checkpoint(run_fn, out)
out.sum().backward()
grad_with_checkpointing = inp.grad
torch.set_rng_state(state)
inp.grad = None
out = phase1(inp)
out = run_fn(out)
out.sum().backward()
grad_no_checkpointing = inp.grad
self.assertEqual(grad_with_checkpointing, grad_no_checkpointing)
|
def run_fn(input):
return phase2(input)
state = torch.get_rng_state()
out = phase1(inp)
out = checkpoint(run_fn, out, use_reentrant=True)
out.sum().backward()
grad_with_checkpointing = inp.grad
torch.set_rng_state(state)
inp.grad = None
out = phase1(inp)
out = run_fn(out)
out.sum().backward()
grad_no_checkpointing = inp.grad
self.assertEqual(grad_with_checkpointing, grad_no_checkpointing)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
foo
|
def foo(t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
t1 = torch.rand(10, requires_grad=True)
t2 = torch.rand(10, requires_grad=True)
t3 = torch.rand(10)
scale = random.randint(0, 10)
res = checkpoint(foo, t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
res[1].sum().backward(retain_graph=True)
res[4].sum().backward(retain_graph=True)
res[6].sum().backward()
with self.assertRaisesRegex(RuntimeError, "Trying to backward through the graph a second time"):
res[6].sum().backward()
t1_grad = t1.grad
t2_grad = t2.grad
# Reset grads, run without checkpoint and validate we receive same grads.
t1.grad = None
t2.grad = None
res = foo(t1, t2, scale, t3)
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertEqual(t1.grad, t1_grad)
self.assertEqual(t2.grad, t2_grad)
|
def foo(t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
t1 = torch.rand(10, requires_grad=True)
t2 = torch.rand(10, requires_grad=True)
t3 = torch.rand(10)
scale = random.randint(0, 10)
res = checkpoint(foo, t1, t2, scale, t3, use_reentrant=True)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
res[1].sum().backward(retain_graph=True)
res[4].sum().backward(retain_graph=True)
res[6].sum().backward()
with self.assertRaisesRegex(
RuntimeError, "Trying to backward through the graph a second time"
):
res[6].sum().backward()
t1_grad = t1.grad
t2_grad = t2.grad
# Reset grads, run without checkpoint and validate we receive same grads.
t1.grad = None
t2.grad = None
res = foo(t1, t2, scale, t3)
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertEqual(t1.grad, t1_grad)
self.assertEqual(t2.grad, t2_grad)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
foo
|
def foo(t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
t1 = torch.rand(10, requires_grad=True)
t2 = torch.rand(10, requires_grad=True)
t3 = torch.rand(10)
scale = random.randint(0, 10)
res = checkpoint(foo, t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
res[1].sum().backward(retain_graph=True)
res[4].sum().backward(retain_graph=True)
res[6].sum().backward()
with self.assertRaisesRegex(RuntimeError, "Trying to backward through the graph a second time"):
res[6].sum().backward()
t1_grad = t1.grad
t2_grad = t2.grad
# Reset grads, run without checkpoint and validate we receive same grads.
t1.grad = None
t2.grad = None
res = foo(t1, t2, scale, t3)
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertEqual(t1.grad, t1_grad)
self.assertEqual(t2.grad, t2_grad)
|
def foo(t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
t1 = torch.rand(10, requires_grad=True)
t2 = torch.rand(10, requires_grad=True)
t3 = torch.rand(10)
scale = random.randint(0, 10)
res = checkpoint(foo, t1, t2, scale, t3, use_reentrant=True)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
res[1].sum().backward(retain_graph=True)
res[4].sum().backward(retain_graph=True)
res[6].sum().backward()
with self.assertRaisesRegex(
RuntimeError, "Trying to backward through the graph a second time"
):
res[6].sum().backward()
t1_grad = t1.grad
t2_grad = t2.grad
# Reset grads, run without checkpoint and validate we receive same grads.
t1.grad = None
t2.grad = None
res = foo(t1, t2, scale, t3)
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertEqual(t1.grad, t1_grad)
self.assertEqual(t2.grad, t2_grad)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
run_fn
|
def run_fn(input):
return phase2(input)
state = torch.get_rng_state()
out = phase1(inp)
out = checkpoint(run_fn, out)
out.sum().backward()
grad_with_checkpointing = inp.grad
torch.set_rng_state(state)
inp.grad = None
out = phase1(inp)
out = run_fn(out)
out.sum().backward()
grad_no_checkpointing = inp.grad
self.assertEqual(grad_with_checkpointing, grad_no_checkpointing)
|
def run_fn(input):
return phase2(input)
state = torch.get_rng_state()
out = phase1(inp)
out = checkpoint(run_fn, out, use_reentrant=True)
out.sum().backward()
grad_with_checkpointing = inp.grad
torch.set_rng_state(state)
inp.grad = None
out = phase1(inp)
out = run_fn(out)
out.sum().backward()
grad_no_checkpointing = inp.grad
self.assertEqual(grad_with_checkpointing, grad_no_checkpointing)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
test_bottleneck_cuda
|
def test_bottleneck_cuda(self):
rc, out, err = self._run_bottleneck('bottleneck_test/test_cuda.py')
self.assertEqual(rc, 0, msg='Run failed with\n{}'.format(err))
self._check_run_args()
self._check_environment_summary(out)
self._check_autograd_summary(out)
self._check_cprof_summary(out)
self._check_cuda(out)
|
def test_bottleneck_cuda(self):
rc, out, err = self._run_bottleneck("bottleneck_test/test_cuda.py")
self.assertEqual(rc, 0, msg=f"Run failed with\n{err}")
self._check_run_args()
self._check_environment_summary(out)
self._check_autograd_summary(out)
self._check_cprof_summary(out)
self._check_cuda(out)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
@unittest.skipIf('SKIP_TEST_BOTTLENECK' in os.environ.keys(), 'SKIP_TEST_BOTTLENECK is set')
class TestBottleneck(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
@unittest.skipIf(
"SKIP_TEST_BOTTLENECK" in os.environ.keys(), "SKIP_TEST_BOTTLENECK is set"
)
class TestBottleneck(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
test_load_standalone
|
def test_load_standalone(self):
build_dir = tempfile.mkdtemp()
try:
src_path = os.path.join(build_dir, "main.cpp")
src = textwrap.dedent("""\
#include <iostream>
#include <torch/torch.h>
int main() {
auto x = torch::eye(3);
std::cout << x << std::endl;
}
""")
with open(src_path, "wt") as f:
f.write(src)
exec_path = torch.utils.cpp_extension.load(
"standalone_load_test",
src_path,
build_directory=build_dir,
is_python_module=False,
is_standalone=True,
)
ext = ".exe" if IS_WINDOWS else ""
self.assertEqual(
exec_path,
os.path.join(build_dir, f"standalone_load_test{ext}")
)
for shell in [True, False]:
r = subprocess.run(
[exec_path],
shell=shell,
stdout=subprocess.PIPE,
)
self.assertEqual(r.returncode, 0)
self.assertEqual(
# Windows prints "\r\n" for newlines.
textwrap.dedent(r.stdout.decode("utf-8")).replace("\r\n", "\n"),
textwrap.dedent("""\
1 0 0
0 1 0
0 0 1
[ CPUFloatType{3,3} ]
""")
)
finally:
shutil.rmtree(build_dir)
|
def test_load_standalone(self):
build_dir = tempfile.mkdtemp()
try:
src_path = os.path.join(build_dir, "main.cpp")
src = textwrap.dedent(
"""\
#include <iostream>
#include <torch/torch.h>
int main() {
auto x = torch::eye(3);
std::cout << x << std::endl;
}
"""
)
with open(src_path, "w") as f:
f.write(src)
exec_path = torch.utils.cpp_extension.load(
"standalone_load_test",
src_path,
build_directory=build_dir,
is_python_module=False,
is_standalone=True,
)
ext = ".exe" if IS_WINDOWS else ""
self.assertEqual(
exec_path, os.path.join(build_dir, f"standalone_load_test{ext}")
)
for shell in [True, False]:
r = subprocess.run(
[exec_path],
shell=shell,
stdout=subprocess.PIPE,
)
self.assertEqual(r.returncode, 0)
self.assertEqual(
# Windows prints "\r\n" for newlines.
textwrap.dedent(r.stdout.decode("utf-8")).replace("\r\n", "\n"),
textwrap.dedent(
"""\
1 0 0
0 1 0
0 0 1
[ CPUFloatType{3,3} ]
"""
),
)
finally:
shutil.rmtree(build_dir)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
@unittest.skipIf(IS_SANDCASTLE, "cpp_extension is OSS only")
class TestStandaloneCPPJIT(TestCase):
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
@unittest.skipIf(IS_SANDCASTLE, "cpp_extension is OSS only")
class TestStandaloneCPPJIT(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
tearDown
|
def tearDown(self):
# Clean up
backend_name = torch._C._get_privateuse1_backend_name()
if hasattr(torch, backend_name):
delattr(torch, backend_name)
if f"torch.{backend_name}" in sys.modules:
del sys.modules[f"torch.{backend_name}"]
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestExtensionUtils(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_utils.py
|
run_fn2
|
def run_fn2(tensor1, tensor2):
return tensor1
input_var = torch.randn(1, 4, requires_grad=False)
input_var2 = torch.randn(1, 4, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
r"none of output has requires_grad=True, this checkpoint\(\) is not necessary"
):
out = checkpoint(run_fn2, input_var, input_var2)
out.sum().backward()
|
def run_fn2(tensor1, tensor2):
return tensor1
input_var = torch.randn(1, 4, requires_grad=False)
input_var2 = torch.randn(1, 4, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
r"none of output has requires_grad=True, this checkpoint\(\) is not necessary",
):
out = checkpoint(run_fn2, input_var, input_var2, use_reentrant=True)
out.sum().backward()
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
_do_test
|
def _do_test(fn, should_free):
stats: List[int] = []
def track(x, idx):
# Track that at each step of the backward, some Tensor were
# de-allocated (which correspond to the checkpoint storage being
# emptied at each step)
def hook(_unused):
self.assertEqual(len(stats), idx)
torch.cuda.synchronize()
stats.append(torch.cuda.memory_allocated())
if idx > 0:
if should_free:
self.assertLess(stats[idx], stats[idx - 1])
else:
self.assertEqual(stats[idx], stats[idx - 1])
x.register_hook(hook)
def test_fn(x):
# The main property of this function is that it contains multiple
# operations that save gradients in a chain.
x = x ** 2
track(x, 2)
x = x ** 2
track(x, 1)
x = x ** 2
track(x, 0)
x = x ** 2
return x.sum()
fn(test_fn)
return stats
x = torch.zeros(10, device="cuda", requires_grad=True)
x.grad = torch.zeros_like(x)
# In a regular backward, buffers get eagerly freed
non_retain_stats = _do_test(lambda fn: fn(x).backward(), True)
# In a retain_grad backward, buffers get preserved
retain_stats = _do_test(lambda fn: fn(x).backward(retain_graph=True), False)
# In a regular backward with checkpoint, buffers get eagerly freed
checkpoint_non_retain_stats = _do_test(lambda fn: checkpoint(fn, x, use_reentrant=False).backward(), True)
# In a retain_grad backward with checkpoint, buffers get preserved
checkpoint_retain_stats = _do_test(lambda fn: checkpoint(fn, x, use_reentrant=False).backward(retain_graph=True), False)
self.assertEqual(non_retain_stats, checkpoint_non_retain_stats)
self.assertEqual(retain_stats, checkpoint_retain_stats)
|
def _do_test(fn, should_free):
stats: List[int] = []
def track(x, idx):
# Track that at each step of the backward, some Tensor were
# de-allocated (which correspond to the checkpoint storage being
# emptied at each step)
def hook(_unused):
self.assertEqual(len(stats), idx)
torch.cuda.synchronize()
stats.append(torch.cuda.memory_allocated())
if idx > 0:
if should_free:
self.assertLess(stats[idx], stats[idx - 1])
else:
self.assertEqual(stats[idx], stats[idx - 1])
x.register_hook(hook)
def test_fn(x):
# The main property of this function is that it contains multiple
# operations that save gradients in a chain.
x = x**2
track(x, 2)
x = x**2
track(x, 1)
x = x**2
track(x, 0)
x = x**2
return x.sum()
fn(test_fn)
return stats
x = torch.zeros(10, device="cuda", requires_grad=True)
x.grad = torch.zeros_like(x)
# In a regular backward, buffers get eagerly freed
non_retain_stats = _do_test(lambda fn: fn(x).backward(), True)
# In a retain_grad backward, buffers get preserved
_unused_retain_stats = _do_test(
lambda fn: fn(x).backward(retain_graph=True), False
)
# In a regular backward with checkpoint, buffers get eagerly freed
checkpoint_non_retain_stats = _do_test(
lambda fn: checkpoint(fn, x, use_reentrant=False).backward(), True
)
# In a retain_grad backward with checkpoint, buffers get eagerly freed
checkpoint_retain_stats = _do_test(
lambda fn: checkpoint(fn, x, use_reentrant=False).backward(
retain_graph=True
),
True,
)
self.assertEqual(non_retain_stats, checkpoint_non_retain_stats)
self.assertEqual(non_retain_stats, checkpoint_retain_stats)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
test_get_device_states_recursive
|
def test_get_device_states_recursive(self):
inp = {
"foo": torch.rand(10, device="cuda:0"),
"bar": [torch.rand(10, device="cuda:1")],
}
device_ids, device_states = get_device_states(inp)
self.assertEqual(2, len(device_ids))
self.assertEqual(2, len(device_states))
self.assertEqual(0, device_ids[0])
self.assertEqual(1, device_ids[1])
self.assertTrue(isinstance(device_states[0], torch.Tensor))
self.assertTrue(isinstance(device_states[1], torch.Tensor))
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
class TestCheckpoint(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_utils.py
|
test_infer_device_state_recursive_multi_cuda
|
class TestDataLoaderUtils(TestCase):
MAX_TIMEOUT_IN_SECOND = 300
|
def test_infer_device_state_recursive_multi_cuda(self):
# Check that no warning is issued for either cuda:0, cuda:1 or
# cuda:0, cuda:0 cases since they are both the same device type
inp = {
"foo": torch.rand(10, device="cuda:0"),
"bar": [torch.rand(10, device="cuda:1")],
}
with warnings.catch_warnings():
warnings.simplefilter("error")
device_type = _infer_device_type(inp)
self.assertEqual("cuda", device_type)
inp = {
"foo": torch.rand(10, device="cuda:0"),
"bar": [torch.rand(10, device="cuda:0")],
}
with warnings.catch_warnings():
warnings.simplefilter("error")
device_type = _infer_device_type(inp)
self.assertEqual("cuda", device_type)
# Check that a warning is issued for cuda:0, meta and that it includes
# device type information
inp = {
"foo": torch.rand(10, device="cuda:0"),
"bar": [torch.rand(10, device="meta")],
}
with warnings.catch_warnings(record=True) as w:
device_type = _infer_device_type(inp)
self.assertEqual("cuda", device_type)
self.assertEqual(len(w), 1)
warning_msg = str(w[-1].message)
self.assertTrue(
"Tensor arguments, excluding CPU tensors, are detected on at least two types of devices"
in warning_msg
)
self.assertTrue("Device types: ['cuda', 'meta']" in warning_msg)
self.assertTrue("first device type: cuda" in warning_msg)
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
class TestCheckpoint(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_utils.py
|
_run_bottleneck
|
def _run_bottleneck(self, test_file, scriptargs=''):
curdir = os.path.dirname(os.path.abspath(__file__))
filepath = '{}/{}'.format(curdir, test_file)
if scriptargs != '':
scriptargs = ' {}'.format(scriptargs)
rc, out, err = self._run(
'{} -m torch.utils.bottleneck {}{}'.format(sys.executable, filepath, scriptargs))
return rc, out, err
|
def _run_bottleneck(self, test_file, scriptargs=""):
curdir = os.path.dirname(os.path.abspath(__file__))
filepath = f"{curdir}/{test_file}"
if scriptargs != "":
scriptargs = f" {scriptargs}"
rc, out, err = self._run(
f"{sys.executable} -m torch.utils.bottleneck {filepath}{scriptargs}"
)
return rc, out, err
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
@unittest.skipIf('SKIP_TEST_BOTTLENECK' in os.environ.keys(), 'SKIP_TEST_BOTTLENECK is set')
class TestBottleneck(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
@unittest.skipIf(
"SKIP_TEST_BOTTLENECK" in os.environ.keys(), "SKIP_TEST_BOTTLENECK is set"
)
class TestBottleneck(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
_fail_msg
|
def _fail_msg(self, msg, output):
return '{}, output was:\n{}'.format(msg, output)
|
def _fail_msg(self, msg, output):
return f"{msg}, output was:\n{output}"
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
@unittest.skipIf('SKIP_TEST_BOTTLENECK' in os.environ.keys(), 'SKIP_TEST_BOTTLENECK is set')
class TestBottleneck(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
@unittest.skipIf(
"SKIP_TEST_BOTTLENECK" in os.environ.keys(), "SKIP_TEST_BOTTLENECK is set"
)
class TestBottleneck(TestCase):
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
test_external_module_register
|
def test_external_module_register(self):
# Built-in module
with self.assertRaisesRegex(RuntimeError, "The runtime module of"):
torch._register_device_module('cuda', torch.cuda)
# Wrong device type
with self.assertRaisesRegex(RuntimeError, "Expected one of cpu"):
torch._register_device_module('dummmy', DummyXPUModule)
with self.assertRaises(AttributeError):
torch.xpu.is_available() # type: ignore[attr-defined]
torch._register_device_module('xpu', DummyXPUModule)
torch.xpu.is_available() # type: ignore[attr-defined]
# No supporting for override
with self.assertRaisesRegex(RuntimeError, "The runtime module of"):
torch._register_device_module('xpu', DummyXPUModule)
|
def test_external_module_register(self):
# Built-in module
with self.assertRaisesRegex(RuntimeError, "The runtime module of"):
torch._register_device_module("cuda", torch.cuda)
# Wrong device type
with self.assertRaisesRegex(RuntimeError, "Expected one of cpu"):
torch._register_device_module("dummmy", DummyPrivateUse1Module)
with self.assertRaises(AttributeError):
torch.privateuseone.is_available() # type: ignore[attr-defined]
torch._register_device_module("privateuseone", DummyPrivateUse1Module)
torch.privateuseone.is_available() # type: ignore[attr-defined]
# No supporting for override
with self.assertRaisesRegex(RuntimeError, "The runtime module of"):
torch._register_device_module("privateuseone", DummyPrivateUse1Module)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestExtensionUtils(TestCase):
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestExtensionUtils(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
test_external_module_register_with_renamed_backend
|
def test_external_module_register_with_renamed_backend(self):
torch.utils.rename_privateuse1_backend("foo")
with self.assertRaisesRegex(RuntimeError, "has already been set"):
torch.utils.rename_privateuse1_backend("dummmy")
custom_backend_name = torch._C._get_privateuse1_backend_name()
self.assertEqual(custom_backend_name, "foo")
with self.assertRaises(AttributeError):
torch.foo.is_available() # type: ignore[attr-defined]
with self.assertRaisesRegex(AssertionError, "Tried to use AMP with the"):
with torch.autocast(device_type=custom_backend_name):
pass
torch._register_device_module("foo", DummyPrivateUse1Module)
torch.foo.is_available() # type: ignore[attr-defined]
with torch.autocast(device_type=custom_backend_name):
pass
self.assertEqual(torch._utils._get_device_index("foo:1"), 1)
self.assertEqual(torch._utils._get_device_index(torch.device("foo:2")), 2)
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestExtensionUtils(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_utils.py
|
test_basic
|
def test_basic(self):
with torch.device('meta') as dev:
x = torch.empty(3, 3)
self.assertEqual(x.device.type, 'meta')
self.assertEqual(dev, torch.device('meta'))
|
def test_basic(self):
self.assertExpectedInline(
torch._utils.render_call(torch.sum, [torch.randn(100)], {"dim": 0}),
"""torch.sum(tensor([...], size=(100,)), dim=0)""",
)
self.assertExpectedInline(
torch._utils.render_call(torch.sum, [torch.randn(100, 100)], {"dim": 0}),
"""torch.sum(tensor([...], size=(100, 100)), dim=0)""",
)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestDeviceUtils(TestCase):
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestRenderUtils(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
test_basic
|
def test_basic(self):
with torch.device('meta') as dev:
x = torch.empty(3, 3)
self.assertEqual(x.device.type, 'meta')
self.assertEqual(dev, torch.device('meta'))
|
def test_basic(self):
self.assertExpectedInline(
torch._utils.render_call(torch.sum, [torch.randn(100)], {"dim": 0}),
"""torch.sum(tensor([...], size=(100,)), dim=0)""",
)
self.assertExpectedInline(
torch._utils.render_call(torch.sum, [torch.randn(100, 100)], {"dim": 0}),
"""torch.sum(tensor([...], size=(100, 100)), dim=0)""",
)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestDeviceUtils(TestCase):
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestRenderUtils(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
test_add_and_search_trie
|
def test_add_and_search_trie(self):
self.trie.add("banana")
self.assertTrue(self.trie.search("banana"))
self.assertFalse(self.trie.search("ban"))
self.assertFalse(self.trie.search("dog"))
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestHipifyTrie(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_utils.py
|
test_add_multiple_and_search_trie
|
def test_add_multiple_and_search_trie(self):
words_to_add = ["banana", "apple", "orange"]
for word in words_to_add:
self.trie.add(word)
for word in words_to_add:
self.assertTrue(self.trie.search(word))
for word in ["ban", "dog", "okay", "app"]:
self.assertFalse(self.trie.search(word))
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestHipifyTrie(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_utils.py
|
test_quote_escape
|
def test_quote_escape(self):
orig_chars = ["*", "[", ".", "+", "a", "z", "-"]
quoted_strs = ["\\*", "\\[", "\\.", "\\+", "a", "z", "\\-"]
for i in range(len(orig_chars)):
self.assertEqual(self.trie.quote(orig_chars[i]), quoted_strs[i])
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestHipifyTrie(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_utils.py
|
test_export_trie_to_regex
|
def test_export_trie_to_regex(self):
words_to_add = [
"__CUDACC__",
"CUDA_ERROR_CONTEXT_ALREADY_CURRENT",
"CUDA_ERROR_ARRAY_IS_MAPPED",
"CUDA_ERROR_NOT_MAPPED",
"CUDA_ERROR_INVALID_SOURCE",
]
for word in words_to_add:
self.trie.add(word)
regex = self.trie.export_to_regex()
expected_regex = r"(?:CUDA_ERROR_(?:ARRAY_IS_MAPPED|CONTEXT_ALREADY_CURRENT|INVALID_SOURCE|NOT_MAPPED)|__CUDACC__)"
self.assertEqual(regex, expected_regex)
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestHipifyTrie(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_utils.py
|
test_prefix_words_export_trie_to_regex
|
def test_prefix_words_export_trie_to_regex(self):
# test case where some nodes have both children and are also leaf nodes.
words_to_add = ["apple", "app", "ban", "banana"]
for word in words_to_add:
self.trie.add(word)
regex = self.trie.export_to_regex()
expected_regex = r"(?:app(?:le)?|ban(?:ana)?)"
self.assertEqual(regex, expected_regex)
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestHipifyTrie(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_utils.py
|
test_single_export_trie_to_regex
|
def test_single_export_trie_to_regex(self):
words_to_add = ["cudaErrorInvalidMemcpyDirection"]
for word in words_to_add:
self.trie.add(word)
regex = self.trie.export_to_regex()
expected_regex = "cudaErrorInvalidMemcpyDirection"
self.assertEqual(regex, expected_regex)
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestHipifyTrie(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_utils.py
|
test_char_export_trie_to_regex
|
def test_char_export_trie_to_regex(self):
self.trie.add("a")
self.assertEqual(self.trie.export_to_regex(), "a")
self.trie.add("b")
self.assertEqual(self.trie.export_to_regex(), "[ab]")
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestHipifyTrie(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_utils.py
|
test_device_mode_ops
|
def test_device_mode_ops(self, device, dtype, op):
func = op.get_op()
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample in samples:
# Only test samples which don't have Tensor inputs. However,
# we don't test the factory property on OpInfo as it is very,
# very incomplete
if tree_any(
lambda x: isinstance(x, torch.Tensor),
(sample.input, sample.args, sample.kwargs)
):
continue
# Many OpInfos will explicitly pass in a device. DeviceContext
# will respect device if it is explicitly specified. To test
# DeviceContext, we have to remove the device kwarg in this case.
# NB: Can't pass None to sample_inputs, the function can't
# handle it.
kwargs = sample.kwargs.copy()
kwargs.pop('device', None)
with torch.device('meta'):
r = func(sample.input, *sample.args, **kwargs)
self.assertTrue(
tree_all_only(torch.Tensor, lambda x: x.device.type == 'meta', r)
)
|
def test_device_mode_ops(self, device, dtype, op):
func = op.get_op()
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample in samples:
# Only test samples which don't have Tensor inputs. However,
# we don't test the factory property on OpInfo as it is very,
# very incomplete
if tree_any(
lambda x: isinstance(x, torch.Tensor),
(sample.input, sample.args, sample.kwargs),
):
continue
# Many OpInfos will explicitly pass in a device. DeviceContext
# will respect device if it is explicitly specified. To test
# DeviceContext, we have to remove the device kwarg in this case.
# NB: Can't pass None to sample_inputs, the function can't
# handle it.
kwargs = sample.kwargs.copy()
kwargs.pop("device", None)
with torch.device("meta"):
r = func(sample.input, *sample.args, **kwargs)
def is_meta_device(x: torch.Tensor) -> bool:
return x.device.type == "meta"
self.assertTrue(tree_all_only(torch.Tensor, is_meta_device, r))
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestDeviceUtils(TestCase):
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestDeviceUtils(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
test_basic
|
def test_basic(self):
with torch.device('meta') as dev:
x = torch.empty(3, 3)
self.assertEqual(x.device.type, 'meta')
self.assertEqual(dev, torch.device('meta'))
|
def test_basic(self):
self.assertExpectedInline(
torch._utils.render_call(torch.sum, [torch.randn(100)], {"dim": 0}),
"""torch.sum(tensor([...], size=(100,)), dim=0)""",
)
self.assertExpectedInline(
torch._utils.render_call(torch.sum, [torch.randn(100, 100)], {"dim": 0}),
"""torch.sum(tensor([...], size=(100, 100)), dim=0)""",
)
|
import sys
import os
import re
import shutil
import random
import subprocess
import tempfile
import traceback
import textwrap
import unittest
from typing import Any, List, Dict
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
from torch.testing._internal.common_device_type import (
ops,
onlyCPU,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
import torch.cuda
from torch.utils._pytree import tree_any, tree_all_only
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch import set_default_device
from torch.utils._device import set_device
from torch.utils._traceback import report_compile_source_on_error
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import TestCase, run_tests
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestDeviceUtils(TestCase):
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestRenderUtils(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_utils.py
|
test_format_traceback_short
|
def test_format_traceback_short(self):
try:
raise RuntimeError
except RuntimeError as e:
self.assertRegex(
format_traceback_short(e.__traceback__),
r".*test_utils.py:\d+ in test_format_traceback_short",
)
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestTraceback(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_utils.py
|
test_captured_traceback
|
def test_captured_traceback(self):
self.assertIn(
"test_captured_traceback", "".join(CapturedTraceback.extract().format())
)
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestTraceback(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_utils.py
|
test_captured_traceback_format_all
|
def test_captured_traceback_format_all(self):
rs = CapturedTraceback.format_all(
[CapturedTraceback.extract(), CapturedTraceback.extract()]
)
self.assertEqual(len(rs), 2)
self.assertIn("test_captured_traceback_format_all", "".join(rs[0]))
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestTraceback(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_utils.py
|
test_captured_traceback_format_all_cached
|
if __name__ == '__main__':
run_tests()
|
def test_captured_traceback_format_all_cached(self):
tb = CapturedTraceback.extract()
tb.format() # cached
rs = CapturedTraceback.format_all([tb, CapturedTraceback.extract()])
self.assertEqual(len(rs), 2)
self.assertIn("test_captured_traceback_format_all", "".join(rs[0]))
|
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unittest
import warnings
from typing import Any, Dict, List
import torch
import torch.cuda
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyCPU,
ops,
)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
load_tests,
)
from torch.utils._device import set_device
from torch.utils._pytree import tree_all_only, tree_any
from torch.utils._traceback import (
CapturedTraceback,
format_traceback_short,
report_compile_source_on_error,
)
from torch.utils.checkpoint import (
_infer_device_type,
checkpoint,
checkpoint_sequential,
get_device_states,
)
from torch.utils.data import DataLoader
load_tests = load_tests
HAS_CUDA = torch.cuda.is_available()
from torch.testing._internal.common_utils import run_tests, TestCase
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
import subprocess
from torch.utils.collect_env import get_pretty_env_info
from torch.utils.hipify import hipify_python # noqa: F401
class TestTraceback(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_utils_internal.py
|
test_justknob_config
|
def test_justknob_config(self):
with self.subTest("Returns True"):
a = JustKnobsConfig()
self.assertTrue(a.get())
with self.subTest("Returns False"):
a = JustKnobsConfig(name="fake_name", default=False)
self.assertFalse(a.get())
with self.subTest("Returns True via config"):
a = JustKnobsConfig(name="fake_name", default=False)
a.set(True)
self.assertTrue(a.get())
with self.subTest("Returns True via env"):
os.environ["FAKE_FEATURE"] = "1"
a = JustKnobsConfig(
name="fake_name", env_name="FAKE_FEATURE", default=False
)
self.assertTrue(a.get())
with self.subTest("Returns same value consistently"):
a = JustKnobsConfig(name="fake_name", default=False)
a.set(True)
self.assertTrue(a.get())
a.set(False)
self.assertTrue(a.get())
with self.subTest("Checks __bool__"):
a = JustKnobsConfig(name="fake_name", default=False)
if a:
raise RuntimeError("Should not be true")
self.assertFalse(a)
|
import os
from torch._utils_internal import justknobs_feature, JustKnobsConfig
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
load_tests,
)
load_tests = load_tests
from torch.testing._internal.common_utils import run_tests, TestCase
class TestJustKnob(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_utils_internal.py
|
test_justknob_feature
|
def test_justknob_feature(self):
with self.subTest("OSS is True"):
self.assertTrue(justknobs_feature("testname"))
with self.subTest("OSS default=True"):
self.assertTrue(justknobs_feature("testname", default=True))
with self.subTest("OSS default=False"):
self.assertFalse(justknobs_feature("testname", default=False))
with self.subTest("OSS config=True, default=False"):
self.assertTrue(
justknobs_feature("testname", config_value=True, default=False)
)
with self.subTest("OSS config=None, default=False"):
self.assertFalse(
justknobs_feature("testname", config_value=None, default=False)
)
with self.subTest("OSS config=False, default=True"):
self.assertFalse(
justknobs_feature("testname", config_value=False, default=True)
)
with self.subTest("OSS env is missing, config=False, default=True"):
self.assertFalse(
justknobs_feature(
"testname", config_value=False, env_name="NOTDEFINED", default=False
)
)
with self.subTest("OSS env is missing, default=False"):
self.assertFalse(
justknobs_feature("testname", env_name="NOTDEFINED", default=False)
)
with self.subTest(
"OSS config overrides env, config=True, env=False, default=False"
):
os.environ["FEATURE_ENV"] = "0"
self.assertTrue(
justknobs_feature(
"testname",
config_value=True,
env_name="FEATURE_ENV",
default=False,
)
)
with self.subTest("OSS env overrides default, , default=False"):
os.environ["FEATURE_ENV"] = "1"
self.assertTrue(
justknobs_feature("testname", env_name="FEATURE_ENV", default=False)
)
with self.subTest("OSS env truthy, config=False, default=False"):
os.environ["FEATURE_ENV"] = "1"
self.assertTrue(
justknobs_feature(
"testname",
env_name="FEATURE_ENV",
default=False,
)
)
os.environ["FEATURE_ENV"] = "true"
self.assertTrue(
justknobs_feature(
"testname",
env_name="FEATURE_ENV",
default=False,
)
)
os.environ["FEATURE_ENV"] = "TRUE"
self.assertTrue(
justknobs_feature(
"testname",
env_name="FEATURE_ENV",
default=False,
)
)
os.environ["FEATURE_ENV"] = "very weird true"
self.assertTrue(
justknobs_feature(
"testname",
env_name="FEATURE_ENV",
default=False,
)
)
with self.subTest("OSS env false, default=True"):
os.environ["FEATURE_ENV"] = "0"
self.assertFalse(
justknobs_feature("testname", env_name="FEATURE_ENV", default=True)
)
os.environ["FEATURE_ENV"] = "false"
self.assertFalse(
justknobs_feature("testname", env_name="FEATURE_ENV", default=True)
)
os.environ["FEATURE_ENV"] = "FALSE"
self.assertFalse(
justknobs_feature("testname", env_name="FEATURE_ENV", default=True)
)
|
import os
from torch._utils_internal import justknobs_feature, JustKnobsConfig
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
load_tests,
)
load_tests = load_tests
from torch.testing._internal.common_utils import run_tests, TestCase
class TestJustKnob(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_vulkan.py
|
test_conv
|
def test_conv(self):
# Conv params
batch_size = 2
input_channels_per_group = 6
height = 16
width = 16
output_channels_per_group = 6
groups = 4
kernel_h = kernel_w = 3
stride_h = stride_w = 1
pad_h = pad_w = 1
dilation = 1
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
paddings = (pad_h, pad_w)
dilations = (dilation, dilation)
conv_weight_shape = (output_channels, input_channels_per_group, kernel_h, kernel_w)
conv_bias_shape = (output_channels)
class Conv2D(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
return F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"vulkan_prepack::conv2d_clamp_prepack": 1,
"vulkan_prepack::conv2d_clamp_run": 1}
TestVulkanRewritePass.validate_transformed_module(Conv2D(), pattern_count_map, data_shape)
class Conv2DRelu(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.relu(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"vulkan_prepack::conv2d_clamp_prepack": 1,
"vulkan_prepack::conv2d_clamp_run": 1}
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(), pattern_count_map, data_shape)
pattern_count_map["aten::relu"] = 1
pattern_count_map["vulkan_prepack::conv2d_clamp_prepack"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::relu"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
class Conv2DHardtanh(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.hardtanh(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"vulkan_prepack::conv2d_clamp_prepack": 1,
"vulkan_prepack::conv2d_clamp_run": 1}
TestVulkanRewritePass.validate_transformed_module(Conv2DHardtanh(), pattern_count_map, data_shape)
pattern_count_map["aten::hardtanh"] = 1
pattern_count_map["vulkan_prepack::conv2d_clamp_prepack"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DHardtanh(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::hardtanh"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
|
def test_conv(self):
# Conv params
batch_size = 2
input_channels_per_group = 6
height = 16
width = 16
output_channels_per_group = 6
groups = 4
kernel_h = kernel_w = 3
stride_h = stride_w = 1
pad_h = pad_w = 1
dilation = 1
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
paddings = (pad_h, pad_w)
dilations = (dilation, dilation)
conv_weight_shape = (output_channels, input_channels_per_group, kernel_h, kernel_w)
conv_bias_shape = (output_channels)
class Conv2D(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
return F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"vulkan_prepack::conv2d_clamp_prepack": 1,
"vulkan_prepack::conv2d_clamp_run": 1}
TestVulkanRewritePass.validate_transformed_module(Conv2D(), pattern_count_map, data_shape)
class Conv2DRelu(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.relu(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"vulkan_prepack::conv2d_clamp_prepack": 1,
"vulkan_prepack::conv2d_clamp_run": 1}
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(), pattern_count_map, data_shape)
pattern_count_map["aten::relu"] = 1
pattern_count_map["vulkan_prepack::conv2d_clamp_prepack"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::relu"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
class Conv2DHardtanh(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.hardtanh(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"vulkan_prepack::conv2d_clamp_prepack": 1,
"vulkan_prepack::conv2d_clamp_run": 1}
TestVulkanRewritePass.validate_transformed_module(Conv2DHardtanh(), pattern_count_map, data_shape)
pattern_count_map["aten::hardtanh"] = 1
pattern_count_map["vulkan_prepack::conv2d_clamp_prepack"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DHardtanh(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::hardtanh"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
|
import unittest
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
@unittest.skipUnless(torch.is_vulkan_available(),
"Vulkan backend must be available for these tests.")
class TestVulkanRewritePass(TestCase):
|
import unittest
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
@unittest.skipUnless(torch.is_vulkan_available(),
"Vulkan backend must be available for these tests.")
class TestVulkanRewritePass(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_vulkan.py
|
__init__
|
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class Conv2D(torch.nn.Module):
|
import unittest
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class Conv2D(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_vulkan.py
|
__init__
|
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class Conv2D(torch.nn.Module):
|
import unittest
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class Conv2D(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_vulkan.py
|
__init__
|
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
|
import unittest
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class Conv2D(torch.nn.Module):
|
import unittest
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class Conv2D(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_weak.py
|
C
|
def C():
return torch.randn(1)
# These tests are ported from cpython/Lib/test/test_weakref.py,
# but adapted to use tensor rather than object
class WeakTest(TestCase):
COUNT = 10
def test_make_weak_keyed_dict_from_dict(self):
o = torch.randn(2)
dict = WeakIdKeyDictionary({o: 364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = torch.randn(3)
dict = WeakIdKeyDictionary({o: 364})
dict2 = WeakIdKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_keyed_dict_popitem(self):
self.check_popitem(WeakIdKeyDictionary, C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(
value1,
value2,
"invalid test" " -- value parameters must be distinct objects",
)
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(WeakIdKeyDictionary, C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict, "mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict, "original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_keyed_dict_update(self):
self.check_update(WeakIdKeyDictionary, {C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = WeakIdKeyDictionary()
o1 = torch.randn(1)
o2 = torch.randn(2)
d[o1] = "something"
d[o2] = "something"
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_keyed_union_operators(self):
try:
{} | {}
except TypeError:
self.skipTest("dict union not supported in this Python")
o1 = C()
o2 = C()
o3 = C()
wkd1 = WeakIdKeyDictionary({o1: 1, o2: 2})
wkd2 = WeakIdKeyDictionary({o3: 3, o1: 4})
wkd3 = wkd1.copy()
d1 = {o2: "5", o3: "6"}
pairs = [(o2, 7), (o3, 8)]
tmp1 = wkd1 | wkd2 # Between two WeakKeyDictionaries
self.assertEqual(dict(tmp1), dict(wkd1) | dict(wkd2))
self.assertIs(type(tmp1), WeakIdKeyDictionary)
wkd1 |= wkd2
self.assertEqual(wkd1, tmp1)
tmp2 = wkd2 | d1 # Between WeakKeyDictionary and mapping
self.assertEqual(dict(tmp2), dict(wkd2) | d1)
self.assertIs(type(tmp2), WeakIdKeyDictionary)
wkd2 |= d1
self.assertEqual(wkd2, tmp2)
tmp3 = wkd3.copy() # Between WeakKeyDictionary and iterable key, value
tmp3 |= pairs
self.assertEqual(dict(tmp3), dict(wkd3) | dict(pairs))
self.assertIs(type(tmp3), WeakIdKeyDictionary)
tmp4 = d1 | wkd3 # Testing .__ror__
self.assertEqual(dict(tmp4), d1 | dict(wkd3))
self.assertIs(type(tmp4), WeakIdKeyDictionary)
del o1
self.assertNotIn(4, tmp1.values())
self.assertNotIn(4, tmp2.values())
self.assertNotIn(1, tmp3.values())
self.assertNotIn(1, tmp4.values())
def test_weak_keyed_bad_delitem(self):
d = WeakIdKeyDictionary()
o = torch.randn(1)
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_make_weak_keyed_dict_repr(self):
dict = WeakIdKeyDictionary()
self.assertRegex(repr(dict), "<WeakIdKeyDictionary at 0x.*>")
def check_threaded_weak_dict_copy(self, type_, deepcopy):
# `deepcopy` should be either True or False.
exc = []
# Cannot give these slots as weakrefs weren't supported
# on these objects until later versions of Python
class DummyKey: # noqa: B903
def __init__(self, ctr):
self.ctr = ctr
class DummyValue: # noqa: B903
def __init__(self, ctr):
self.ctr = ctr
def dict_copy(d, exc):
try:
if deepcopy is True:
_ = copy.deepcopy(d)
else:
_ = d.copy()
except Exception as ex:
exc.append(ex)
def pop_and_collect(lst):
gc_ctr = 0
while lst:
i = random.randint(0, len(lst) - 1)
gc_ctr += 1
lst.pop(i)
if gc_ctr % 10000 == 0:
gc.collect() # just in case
d = type_()
keys = []
values = []
# Initialize d with many entries
for i in range(70000):
k, v = DummyKey(i), DummyValue(i)
keys.append(k)
values.append(v)
d[k] = v
del k
del v
t_copy = threading.Thread(
target=dict_copy,
args=(
d,
exc,
),
)
t_collect = threading.Thread(target=pop_and_collect, args=(keys,))
t_copy.start()
t_collect.start()
t_copy.join()
t_collect.join()
# Test exceptions
if exc:
raise exc[0]
def test_threaded_weak_key_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(WeakIdKeyDictionary, False)
def test_threaded_weak_key_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(WeakIdKeyDictionary, True)
# Adapted from cpython/Lib/test/mapping_tests.py
class WeakKeyDictionaryTestCase(TestCase):
__ref = {torch.randn(1): 1, torch.randn(2): 2, torch.randn(3): 3}
type2test = WeakIdKeyDictionary
def _reference(self):
return self.__ref.copy()
def _empty_mapping(self):
"""Return an empty mapping object"""
return self.type2test()
def _full_mapping(self, data):
"""Return a mapping object with the value contained in data
dictionary"""
x = self._empty_mapping()
for key, value in data.items():
x[key] = value
return x
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
self.reference = self._reference().copy()
# A (key, value) pair not in the mapping
key, value = self.reference.popitem()
self.other = {key: value}
# A (key, value) pair in the mapping
key, value = self.reference.popitem()
self.inmapping = {key: value}
self.reference[key] = value
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) # workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
# Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = list(self.other.keys())[0]
self.assertRaises(KeyError, lambda: d[knownkey])
# len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
# __contains__
for k in self.reference:
self.assertIn(k, d)
for k in self.other:
self.assertNotIn(k, d)
# cmp
self.assertTrue(
p == p
) # NB: don't use assertEqual, that doesn't actually use ==
self.assertTrue(d == d)
self.assertTrue(p != d)
self.assertTrue(d != p)
# bool
if p:
self.fail("Empty mapping must compare to False")
if not d:
self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assertTrue(hasattr(iter, "__next__"))
self.assertTrue(hasattr(iter, "__iter__"))
x = list(iter)
self.assertTrue(set(x) == set(lst) == set(ref))
check_iterandlist(iter(d.keys()), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d.values()), list(d.values()), self.reference.values())
check_iterandlist(iter(d.items()), list(d.items()), self.reference.items())
# get
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.assertNotIn(knownkey, d)
def test_write(self):
# Test for write operations on mapping
p = self._empty_mapping()
# Indexing
for key, value in self.reference.items():
p[key] = value
self.assertEqual(p[key], value)
for key in self.reference.keys():
del p[key]
self.assertRaises(KeyError, lambda: p[key])
p = self._empty_mapping()
# update
p.update(self.reference)
self.assertEqual(dict(p), self.reference)
items = list(p.items())
p = self._empty_mapping()
p.update(items)
self.assertEqual(dict(p), self.reference)
d = self._full_mapping(self.reference)
# setdefault
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.setdefault(key, knownvalue), value)
self.assertEqual(d[key], value)
self.assertEqual(d.setdefault(knownkey, knownvalue), knownvalue)
self.assertEqual(d[knownkey], knownvalue)
# pop
self.assertEqual(d.pop(knownkey), knownvalue)
self.assertNotIn(knownkey, d)
self.assertRaises(KeyError, d.pop, knownkey)
default = 909
d[knownkey] = knownvalue
self.assertEqual(d.pop(knownkey, default), knownvalue)
self.assertNotIn(knownkey, d)
self.assertEqual(d.pop(knownkey, default), default)
# popitem
key, value = d.popitem()
self.assertNotIn(key, d)
self.assertEqual(value, self.reference[key])
p = self._empty_mapping()
self.assertRaises(KeyError, p.popitem)
def test_constructor(self):
self.assertEqual(self._empty_mapping(), self._empty_mapping())
def test_bool(self):
self.assertTrue(not self._empty_mapping())
self.assertTrue(self.reference)
self.assertTrue(bool(self._empty_mapping()) is False)
self.assertTrue(bool(self.reference) is True)
def test_keys(self):
d = self._empty_mapping()
self.assertEqual(list(d.keys()), [])
d = self.reference
self.assertIn(list(self.inmapping.keys())[0], d.keys())
self.assertNotIn(list(self.other.keys())[0], d.keys())
self.assertRaises(TypeError, d.keys, None)
def test_values(self):
d = self._empty_mapping()
self.assertEqual(list(d.values()), [])
self.assertRaises(TypeError, d.values, None)
def test_items(self):
d = self._empty_mapping()
self.assertEqual(list(d.items()), [])
self.assertRaises(TypeError, d.items, None)
def test_len(self):
d = self._empty_mapping()
self.assertEqual(len(d), 0)
def test_getitem(self):
d = self.reference
self.assertEqual(
d[list(self.inmapping.keys())[0]], list(self.inmapping.values())[0]
)
self.assertRaises(TypeError, d.__getitem__)
def test_update(self):
# mapping argument
d = self._empty_mapping()
d.update(self.other)
self.assertEqual(list(d.items()), list(self.other.items()))
# No argument
d = self._empty_mapping()
d.update()
self.assertEqual(d, self._empty_mapping())
# item sequence
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# Iterator
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# FIXME: Doesn't work with UserDict
# self.assertRaises((TypeError, AttributeError), d.update, None)
self.assertRaises((TypeError, AttributeError), d.update, 42)
outerself = self
class SimpleUserDict:
def __init__(self):
self.d = outerself.reference
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
i1 = sorted((id(k), v) for k, v in d.items())
i2 = sorted((id(k), v) for k, v in self.reference.items())
self.assertEqual(i1, i2)
class Exc(Exception):
pass
d = self._empty_mapping()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d.clear()
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return "a"
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord("a")
def __iter__(self):
return self
def __next__(self):
if self.i <= ord("z"):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d = self._empty_mapping()
class badseq:
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, d.update, badseq())
self.assertRaises(ValueError, d.update, [(1, 2, 3)])
# no test_fromkeys or test_copy as both os.environ and selves don't support it
def test_get(self):
d = self._empty_mapping()
self.assertTrue(d.get(list(self.other.keys())[0]) is None)
self.assertEqual(d.get(list(self.other.keys())[0], 3), 3)
d = self.reference
self.assertTrue(d.get(list(self.other.keys())[0]) is None)
self.assertEqual(d.get(list(self.other.keys())[0], 3), 3)
self.assertEqual(
d.get(list(self.inmapping.keys())[0]), list(self.inmapping.values())[0]
)
self.assertEqual(
d.get(list(self.inmapping.keys())[0], 3), list(self.inmapping.values())[0]
)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
d = self._empty_mapping()
self.assertRaises(TypeError, d.setdefault)
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
self.assertRaises(TypeError, d.popitem, 42)
def test_pop(self):
d = self._empty_mapping()
k, v = list(self.inmapping.items())[0]
d[k] = v
self.assertRaises(KeyError, d.pop, list(self.other.keys())[0])
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
if __name__ == "__main__":
run_tests()
|
def C():
return torch.randn(1)
# These tests are ported from cpython/Lib/test/test_weakref.py,
# but adapted to use tensor rather than object
class WeakTest(TestCase):
COUNT = 10
def test_make_weak_keyed_dict_from_dict(self):
o = torch.randn(2)
dict = WeakIdKeyDictionary({o: 364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = torch.randn(3)
dict = WeakIdKeyDictionary({o: 364})
dict2 = WeakIdKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_keyed_dict_popitem(self):
self.check_popitem(WeakIdKeyDictionary, C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(
value1,
value2,
"invalid test -- value parameters must be distinct objects",
)
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(WeakIdKeyDictionary, C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict, "mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict, "original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_keyed_dict_update(self):
self.check_update(WeakIdKeyDictionary, {C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = WeakIdKeyDictionary()
o1 = torch.randn(1)
o2 = torch.randn(2)
d[o1] = "something"
d[o2] = "something"
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_keyed_union_operators(self):
try:
{} | {}
except TypeError:
self.skipTest("dict union not supported in this Python")
o1 = C()
o2 = C()
o3 = C()
wkd1 = WeakIdKeyDictionary({o1: 1, o2: 2})
wkd2 = WeakIdKeyDictionary({o3: 3, o1: 4})
wkd3 = wkd1.copy()
d1 = {o2: "5", o3: "6"}
pairs = [(o2, 7), (o3, 8)]
tmp1 = wkd1 | wkd2 # Between two WeakKeyDictionaries
self.assertEqual(dict(tmp1), dict(wkd1) | dict(wkd2))
self.assertIs(type(tmp1), WeakIdKeyDictionary)
wkd1 |= wkd2
self.assertEqual(wkd1, tmp1)
tmp2 = wkd2 | d1 # Between WeakKeyDictionary and mapping
self.assertEqual(dict(tmp2), dict(wkd2) | d1)
self.assertIs(type(tmp2), WeakIdKeyDictionary)
wkd2 |= d1
self.assertEqual(wkd2, tmp2)
tmp3 = wkd3.copy() # Between WeakKeyDictionary and iterable key, value
tmp3 |= pairs
self.assertEqual(dict(tmp3), dict(wkd3) | dict(pairs))
self.assertIs(type(tmp3), WeakIdKeyDictionary)
tmp4 = d1 | wkd3 # Testing .__ror__
self.assertEqual(dict(tmp4), d1 | dict(wkd3))
self.assertIs(type(tmp4), WeakIdKeyDictionary)
del o1
self.assertNotIn(4, tmp1.values())
self.assertNotIn(4, tmp2.values())
self.assertNotIn(1, tmp3.values())
self.assertNotIn(1, tmp4.values())
def test_weak_keyed_bad_delitem(self):
d = WeakIdKeyDictionary()
o = torch.randn(1)
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_make_weak_keyed_dict_repr(self):
dict = WeakIdKeyDictionary()
self.assertRegex(repr(dict), "<WeakIdKeyDictionary at 0x.*>")
def check_threaded_weak_dict_copy(self, type_, deepcopy):
# `deepcopy` should be either True or False.
exc = []
# Cannot give these slots as weakrefs weren't supported
# on these objects until later versions of Python
class DummyKey: # noqa: B903
def __init__(self, ctr):
self.ctr = ctr
class DummyValue: # noqa: B903
def __init__(self, ctr):
self.ctr = ctr
def dict_copy(d, exc):
try:
if deepcopy is True:
_ = copy.deepcopy(d)
else:
_ = d.copy()
except Exception as ex:
exc.append(ex)
def pop_and_collect(lst):
gc_ctr = 0
while lst:
i = random.randint(0, len(lst) - 1)
gc_ctr += 1
lst.pop(i)
if gc_ctr % 10000 == 0:
gc.collect() # just in case
d = type_()
keys = []
values = []
# Initialize d with many entries
for i in range(70000):
k, v = DummyKey(i), DummyValue(i)
keys.append(k)
values.append(v)
d[k] = v
del k
del v
t_copy = threading.Thread(target=dict_copy, args=(d, exc))
t_collect = threading.Thread(target=pop_and_collect, args=(keys,))
t_copy.start()
t_collect.start()
t_copy.join()
t_collect.join()
# Test exceptions
if exc:
raise exc[0]
def test_threaded_weak_key_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(WeakIdKeyDictionary, False)
def test_threaded_weak_key_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(WeakIdKeyDictionary, True)
# Adapted from cpython/Lib/test/mapping_tests.py
class WeakKeyDictionaryTestCase(TestCase):
__ref = {torch.randn(1): 1, torch.randn(2): 2, torch.randn(3): 3}
type2test = WeakIdKeyDictionary
def _reference(self):
return self.__ref.copy()
def _empty_mapping(self):
"""Return an empty mapping object"""
return self.type2test()
def _full_mapping(self, data):
"""Return a mapping object with the value contained in data
dictionary"""
x = self._empty_mapping()
for key, value in data.items():
x[key] = value
return x
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
self.reference = self._reference().copy()
# A (key, value) pair not in the mapping
key, value = self.reference.popitem()
self.other = {key: value}
# A (key, value) pair in the mapping
key, value = self.reference.popitem()
self.inmapping = {key: value}
self.reference[key] = value
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) # workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
# Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = next(iter(self.other.keys()))
self.assertRaises(KeyError, lambda: d[knownkey])
# len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
# __contains__
for k in self.reference:
self.assertIn(k, d)
for k in self.other:
self.assertNotIn(k, d)
# cmp
self.assertTrue(
p == p
) # NB: don't use assertEqual, that doesn't actually use ==
self.assertTrue(d == d)
self.assertTrue(p != d)
self.assertTrue(d != p)
# bool
if p:
self.fail("Empty mapping must compare to False")
if not d:
self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assertTrue(hasattr(iter, "__next__"))
self.assertTrue(hasattr(iter, "__iter__"))
x = list(iter)
self.assertTrue(set(x) == set(lst) == set(ref))
check_iterandlist(iter(d.keys()), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d.values()), list(d.values()), self.reference.values())
check_iterandlist(iter(d.items()), list(d.items()), self.reference.items())
# get
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.assertNotIn(knownkey, d)
def test_write(self):
# Test for write operations on mapping
p = self._empty_mapping()
# Indexing
for key, value in self.reference.items():
p[key] = value
self.assertEqual(p[key], value)
for key in self.reference.keys():
del p[key]
self.assertRaises(KeyError, lambda: p[key])
p = self._empty_mapping()
# update
p.update(self.reference)
self.assertEqual(dict(p), self.reference)
items = list(p.items())
p = self._empty_mapping()
p.update(items)
self.assertEqual(dict(p), self.reference)
d = self._full_mapping(self.reference)
# setdefault
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.setdefault(key, knownvalue), value)
self.assertEqual(d[key], value)
self.assertEqual(d.setdefault(knownkey, knownvalue), knownvalue)
self.assertEqual(d[knownkey], knownvalue)
# pop
self.assertEqual(d.pop(knownkey), knownvalue)
self.assertNotIn(knownkey, d)
self.assertRaises(KeyError, d.pop, knownkey)
default = 909
d[knownkey] = knownvalue
self.assertEqual(d.pop(knownkey, default), knownvalue)
self.assertNotIn(knownkey, d)
self.assertEqual(d.pop(knownkey, default), default)
# popitem
key, value = d.popitem()
self.assertNotIn(key, d)
self.assertEqual(value, self.reference[key])
p = self._empty_mapping()
self.assertRaises(KeyError, p.popitem)
def test_constructor(self):
self.assertEqual(self._empty_mapping(), self._empty_mapping())
def test_bool(self):
self.assertTrue(not self._empty_mapping())
self.assertTrue(self.reference)
self.assertTrue(bool(self._empty_mapping()) is False)
self.assertTrue(bool(self.reference) is True)
def test_keys(self):
d = self._empty_mapping()
self.assertEqual(list(d.keys()), [])
d = self.reference
self.assertIn(next(iter(self.inmapping.keys())), d.keys())
self.assertNotIn(next(iter(self.other.keys())), d.keys())
self.assertRaises(TypeError, d.keys, None)
def test_values(self):
d = self._empty_mapping()
self.assertEqual(list(d.values()), [])
self.assertRaises(TypeError, d.values, None)
def test_items(self):
d = self._empty_mapping()
self.assertEqual(list(d.items()), [])
self.assertRaises(TypeError, d.items, None)
def test_len(self):
d = self._empty_mapping()
self.assertEqual(len(d), 0)
def test_getitem(self):
d = self.reference
self.assertEqual(
d[next(iter(self.inmapping.keys()))], next(iter(self.inmapping.values()))
)
self.assertRaises(TypeError, d.__getitem__)
def test_update(self):
# mapping argument
d = self._empty_mapping()
d.update(self.other)
self.assertEqual(list(d.items()), list(self.other.items()))
# No argument
d = self._empty_mapping()
d.update()
self.assertEqual(d, self._empty_mapping())
# item sequence
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# Iterator
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# FIXME: Doesn't work with UserDict
# self.assertRaises((TypeError, AttributeError), d.update, None)
self.assertRaises((TypeError, AttributeError), d.update, 42)
outerself = self
class SimpleUserDict:
def __init__(self) -> None:
self.d = outerself.reference
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
i1 = sorted((id(k), v) for k, v in d.items())
i2 = sorted((id(k), v) for k, v in self.reference.items())
self.assertEqual(i1, i2)
class Exc(Exception):
pass
d = self._empty_mapping()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d.clear()
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self) -> None:
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return "a"
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self) -> None:
self.i = ord("a")
def __iter__(self):
return self
def __next__(self):
if self.i <= ord("z"):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d = self._empty_mapping()
class badseq:
def __iter__(self):
return self
def __next__(self):
raise Exc
self.assertRaises(Exc, d.update, badseq())
self.assertRaises(ValueError, d.update, [(1, 2, 3)])
# no test_fromkeys or test_copy as both os.environ and selves don't support it
def test_get(self):
d = self._empty_mapping()
self.assertTrue(d.get(next(iter(self.other.keys()))) is None)
self.assertEqual(d.get(next(iter(self.other.keys())), 3), 3)
d = self.reference
self.assertTrue(d.get(next(iter(self.other.keys()))) is None)
self.assertEqual(d.get(next(iter(self.other.keys())), 3), 3)
self.assertEqual(
d.get(next(iter(self.inmapping.keys()))),
next(iter(self.inmapping.values())),
)
self.assertEqual(
d.get(next(iter(self.inmapping.keys())), 3),
next(iter(self.inmapping.values())),
)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
d = self._empty_mapping()
self.assertRaises(TypeError, d.setdefault)
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
self.assertRaises(TypeError, d.popitem, 42)
def test_pop(self):
d = self._empty_mapping()
k, v = next(iter(self.inmapping.items()))
d[k] = v
self.assertRaises(KeyError, d.pop, next(iter(self.other.keys())))
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
# Adapted from cpython/Lib/test/mapping_tests.py
class WeakKeyDictionaryScriptObjectTestCase(TestCase):
def _reference(self):
self.__ref = {
torch.classes._TorchScriptTesting._Foo(1, 2): 1,
torch.classes._TorchScriptTesting._Foo(2, 3): 2,
torch.classes._TorchScriptTesting._Foo(3, 4): 3,
}
return self.__ref.copy()
def _empty_mapping(self):
"""Return an empty mapping object"""
return WeakIdKeyDictionary(ref_type=_WeakHashRef)
def _full_mapping(self, data):
"""Return a mapping object with the value contained in data
dictionary"""
x = self._empty_mapping()
for key, value in data.items():
x[key] = value
return x
def setUp(self):
if IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
if IS_SANDCASTLE or IS_FBCODE:
torch.ops.load_library(
"//caffe2/test/cpp/jit:test_custom_class_registrations"
)
elif IS_MACOS:
# don't load the library, just skip the tests in setUp
return
else:
lib_file_path = find_library_location("libtorchbind_test.so")
if IS_WINDOWS:
lib_file_path = find_library_location("torchbind_test.dll")
torch.ops.load_library(str(lib_file_path))
self.reference = self._reference().copy()
# A (key, value) pair not in the mapping
key, value = self.reference.popitem()
self.other = {key: value}
# A (key, value) pair in the mapping
key, value = self.reference.popitem()
self.inmapping = {key: value}
self.reference[key] = value
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) # workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
# Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = next(iter(self.other.keys()))
self.assertRaises(KeyError, lambda: d[knownkey])
# len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
# __contains__
for k in self.reference:
self.assertIn(k, d)
for k in self.other:
self.assertNotIn(k, d)
# cmp
self.assertTrue(
p == p
) # NB: don't use assertEqual, that doesn't actually use ==
self.assertTrue(d == d)
self.assertTrue(p != d)
self.assertTrue(d != p)
# bool
if p:
self.fail("Empty mapping must compare to False")
if not d:
self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assertTrue(hasattr(iter, "__next__"))
self.assertTrue(hasattr(iter, "__iter__"))
x = list(iter)
self.assertTrue(set(x) == set(lst) == set(ref))
check_iterandlist(iter(d.keys()), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d.values()), list(d.values()), self.reference.values())
check_iterandlist(iter(d.items()), list(d.items()), self.reference.items())
# get
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.assertNotIn(knownkey, d)
def test_write(self):
# Test for write operations on mapping
p = self._empty_mapping()
# Indexing
for key, value in self.reference.items():
p[key] = value
self.assertEqual(p[key], value)
for key in self.reference.keys():
del p[key]
self.assertRaises(KeyError, lambda: p[key])
p = self._empty_mapping()
# update
p.update(self.reference)
self.assertEqual(dict(p), self.reference)
items = list(p.items())
p = self._empty_mapping()
p.update(items)
self.assertEqual(dict(p), self.reference)
d = self._full_mapping(self.reference)
# setdefault
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.setdefault(key, knownvalue), value)
self.assertEqual(d[key], value)
self.assertEqual(d.setdefault(knownkey, knownvalue), knownvalue)
self.assertEqual(d[knownkey], knownvalue)
# pop
self.assertEqual(d.pop(knownkey), knownvalue)
self.assertNotIn(knownkey, d)
self.assertRaises(KeyError, d.pop, knownkey)
default = 909
d[knownkey] = knownvalue
self.assertEqual(d.pop(knownkey, default), knownvalue)
self.assertNotIn(knownkey, d)
self.assertEqual(d.pop(knownkey, default), default)
# popitem
key, value = d.popitem()
self.assertNotIn(key, d)
self.assertEqual(value, self.reference[key])
p = self._empty_mapping()
self.assertRaises(KeyError, p.popitem)
def test_constructor(self):
self.assertEqual(self._empty_mapping(), self._empty_mapping())
def test_bool(self):
self.assertTrue(not self._empty_mapping())
self.assertTrue(self.reference)
self.assertTrue(bool(self._empty_mapping()) is False)
self.assertTrue(bool(self.reference) is True)
def test_keys(self):
d = self._empty_mapping()
self.assertEqual(list(d.keys()), [])
d = self.reference
self.assertIn(next(iter(self.inmapping.keys())), d.keys())
self.assertNotIn(next(iter(self.other.keys())), d.keys())
self.assertRaises(TypeError, d.keys, None)
def test_values(self):
d = self._empty_mapping()
self.assertEqual(list(d.values()), [])
self.assertRaises(TypeError, d.values, None)
def test_items(self):
d = self._empty_mapping()
self.assertEqual(list(d.items()), [])
self.assertRaises(TypeError, d.items, None)
def test_len(self):
d = self._empty_mapping()
self.assertEqual(len(d), 0)
def test_getitem(self):
d = self.reference
self.assertEqual(
d[next(iter(self.inmapping.keys()))], next(iter(self.inmapping.values()))
)
self.assertRaises(TypeError, d.__getitem__)
def test_update(self):
# mapping argument
d = self._empty_mapping()
d.update(self.other)
self.assertEqual(list(d.items()), list(self.other.items()))
# No argument
d = self._empty_mapping()
d.update()
self.assertEqual(d, self._empty_mapping())
# item sequence
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# Iterator
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# FIXME: Doesn't work with UserDict
# self.assertRaises((TypeError, AttributeError), d.update, None)
self.assertRaises((TypeError, AttributeError), d.update, 42)
outerself = self
class SimpleUserDict:
def __init__(self) -> None:
self.d = outerself.reference
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
i1 = sorted((id(k), v) for k, v in d.items())
i2 = sorted((id(k), v) for k, v in self.reference.items())
self.assertEqual(i1, i2)
class Exc(Exception):
pass
d = self._empty_mapping()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d.clear()
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self) -> None:
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return "a"
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self) -> None:
self.i = ord("a")
def __iter__(self):
return self
def __next__(self):
if self.i <= ord("z"):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d = self._empty_mapping()
class badseq:
def __iter__(self):
return self
def __next__(self):
raise Exc
self.assertRaises(Exc, d.update, badseq())
self.assertRaises(ValueError, d.update, [(1, 2, 3)])
# no test_fromkeys or test_copy as both os.environ and selves don't support it
def test_get(self):
d = self._empty_mapping()
self.assertTrue(d.get(next(iter(self.other.keys()))) is None)
self.assertEqual(d.get(next(iter(self.other.keys())), 3), 3)
d = self.reference
self.assertTrue(d.get(next(iter(self.other.keys()))) is None)
self.assertEqual(d.get(next(iter(self.other.keys())), 3), 3)
self.assertEqual(
d.get(next(iter(self.inmapping.keys()))),
next(iter(self.inmapping.values())),
)
self.assertEqual(
d.get(next(iter(self.inmapping.keys())), 3),
next(iter(self.inmapping.values())),
)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
d = self._empty_mapping()
self.assertRaises(TypeError, d.setdefault)
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
self.assertRaises(TypeError, d.popitem, 42)
def test_pop(self):
d = self._empty_mapping()
k, v = next(iter(self.inmapping.items()))
d[k] = v
self.assertRaises(KeyError, d.pop, next(iter(self.other.keys())))
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
if __name__ == "__main__":
run_tests()
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.weak import WeakIdKeyDictionary
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import (
find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
TestCase,
)
from torch.utils.weak import _WeakHashRef, WeakIdKeyDictionary
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_view_ops.py
|
test_resize_overflow
|
def test_resize_overflow(self, device):
x = torch.empty((), dtype=torch.float64)
with self.assertRaisesRegex(RuntimeError, 'Storage size calculation overflowed'):
x.resize_([2, 4, 2**29, 2**29])
with self.assertRaisesRegex(RuntimeError, 'overflow'):
x.resize_([8, 8, 2**29, 2**29])
|
def test_resize_overflow(self, device):
x = torch.empty((), dtype=torch.float64)
with self.assertRaisesRegex(
RuntimeError, "Storage size calculation overflowed"
):
x.resize_([2, 4, 2**29, 2**29])
with self.assertRaisesRegex(RuntimeError, "overflow"):
x.resize_([8, 8, 2**29, 2**29])
with self.assertRaisesRegex(RuntimeError, "Stride calculation overflowed"):
x.resize_([0, 4, 2305843009213693952])
|
import torch
import numpy as np
import unittest
from itertools import product, permutations, combinations
from functools import partial
import random
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
IS_FBCODE, TestCase, run_tests, suppress_warnings, gradcheck, gradgradcheck,
numpy_to_torch_dtype_dict, skipIfTorchDynamo
)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, onlyCPU, dtypes, onlyNativeDeviceTypes, skipMeta)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, complex_types, all_types_and, floating_and_complex_types_and,
)
class TestOldViewOps(TestCase):
|
import random
import unittest
from functools import partial
from itertools import combinations, permutations, product
import numpy as np
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_device_type import (
dtypes,
instantiate_device_type_tests,
onlyCPU,
onlyNativeDeviceTypes,
skipLazy,
skipMeta,
skipXLA,
)
from torch.testing._internal.common_dtype import (
all_types_and,
all_types_and_complex_and,
complex_types,
floating_and_complex_types_and,
)
from torch.testing._internal.common_utils import (
gradcheck,
gradgradcheck,
IS_FBCODE,
numpy_to_torch_dtype_dict,
run_tests,
skipIfTorchDynamo,
suppress_warnings,
TestCase,
)
class TestOldViewOps(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_weak.py
|
test_read
|
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) # workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
# Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = list(self.other.keys())[0]
self.assertRaises(KeyError, lambda: d[knownkey])
# len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
# __contains__
for k in self.reference:
self.assertIn(k, d)
for k in self.other:
self.assertNotIn(k, d)
# cmp
self.assertTrue(
p == p
) # NB: don't use assertEqual, that doesn't actually use ==
self.assertTrue(d == d)
self.assertTrue(p != d)
self.assertTrue(d != p)
# bool
if p:
self.fail("Empty mapping must compare to False")
if not d:
self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assertTrue(hasattr(iter, "__next__"))
self.assertTrue(hasattr(iter, "__iter__"))
x = list(iter)
self.assertTrue(set(x) == set(lst) == set(ref))
check_iterandlist(iter(d.keys()), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d.values()), list(d.values()), self.reference.values())
check_iterandlist(iter(d.items()), list(d.items()), self.reference.items())
# get
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.assertNotIn(knownkey, d)
|
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) # workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
# Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = next(iter(self.other.keys()))
self.assertRaises(KeyError, lambda: d[knownkey])
# len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
# __contains__
for k in self.reference:
self.assertIn(k, d)
for k in self.other:
self.assertNotIn(k, d)
# cmp
self.assertTrue(
p == p
) # NB: don't use assertEqual, that doesn't actually use ==
self.assertTrue(d == d)
self.assertTrue(p != d)
self.assertTrue(d != p)
# bool
if p:
self.fail("Empty mapping must compare to False")
if not d:
self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assertTrue(hasattr(iter, "__next__"))
self.assertTrue(hasattr(iter, "__iter__"))
x = list(iter)
self.assertTrue(set(x) == set(lst) == set(ref))
check_iterandlist(iter(d.keys()), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d.values()), list(d.values()), self.reference.values())
check_iterandlist(iter(d.items()), list(d.items()), self.reference.items())
# get
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.assertNotIn(knownkey, d)
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.weak import WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import (
find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
TestCase,
)
from torch.utils.weak import _WeakHashRef, WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_weak.py
|
test_keys
|
def test_keys(self):
d = self._empty_mapping()
self.assertEqual(list(d.keys()), [])
d = self.reference
self.assertIn(list(self.inmapping.keys())[0], d.keys())
self.assertNotIn(list(self.other.keys())[0], d.keys())
self.assertRaises(TypeError, d.keys, None)
|
def test_keys(self):
d = self._empty_mapping()
self.assertEqual(list(d.keys()), [])
d = self.reference
self.assertIn(next(iter(self.inmapping.keys())), d.keys())
self.assertNotIn(next(iter(self.other.keys())), d.keys())
self.assertRaises(TypeError, d.keys, None)
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.weak import WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import (
find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
TestCase,
)
from torch.utils.weak import _WeakHashRef, WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_weak.py
|
test_getitem
|
def test_getitem(self):
d = self.reference
self.assertEqual(
d[list(self.inmapping.keys())[0]], list(self.inmapping.values())[0]
)
self.assertRaises(TypeError, d.__getitem__)
|
def test_getitem(self):
d = self.reference
self.assertEqual(
d[next(iter(self.inmapping.keys()))], next(iter(self.inmapping.values()))
)
self.assertRaises(TypeError, d.__getitem__)
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.weak import WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import (
find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
TestCase,
)
from torch.utils.weak import _WeakHashRef, WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_weak.py
|
test_update
|
def test_update(self):
# mapping argument
d = self._empty_mapping()
d.update(self.other)
self.assertEqual(list(d.items()), list(self.other.items()))
# No argument
d = self._empty_mapping()
d.update()
self.assertEqual(d, self._empty_mapping())
# item sequence
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# Iterator
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# FIXME: Doesn't work with UserDict
# self.assertRaises((TypeError, AttributeError), d.update, None)
self.assertRaises((TypeError, AttributeError), d.update, 42)
outerself = self
class SimpleUserDict:
def __init__(self):
self.d = outerself.reference
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
i1 = sorted((id(k), v) for k, v in d.items())
i2 = sorted((id(k), v) for k, v in self.reference.items())
self.assertEqual(i1, i2)
class Exc(Exception):
pass
d = self._empty_mapping()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d.clear()
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return "a"
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord("a")
def __iter__(self):
return self
def __next__(self):
if self.i <= ord("z"):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d = self._empty_mapping()
class badseq:
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, d.update, badseq())
self.assertRaises(ValueError, d.update, [(1, 2, 3)])
# no test_fromkeys or test_copy as both os.environ and selves don't support it
|
def test_update(self):
# mapping argument
d = self._empty_mapping()
d.update(self.other)
self.assertEqual(list(d.items()), list(self.other.items()))
# No argument
d = self._empty_mapping()
d.update()
self.assertEqual(d, self._empty_mapping())
# item sequence
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# Iterator
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# FIXME: Doesn't work with UserDict
# self.assertRaises((TypeError, AttributeError), d.update, None)
self.assertRaises((TypeError, AttributeError), d.update, 42)
outerself = self
class SimpleUserDict:
def __init__(self) -> None:
self.d = outerself.reference
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
i1 = sorted((id(k), v) for k, v in d.items())
i2 = sorted((id(k), v) for k, v in self.reference.items())
self.assertEqual(i1, i2)
class Exc(Exception):
pass
d = self._empty_mapping()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d.clear()
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self) -> None:
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return "a"
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self) -> None:
self.i = ord("a")
def __iter__(self):
return self
def __next__(self):
if self.i <= ord("z"):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d = self._empty_mapping()
class badseq:
def __iter__(self):
return self
def __next__(self):
raise Exc
self.assertRaises(Exc, d.update, badseq())
self.assertRaises(ValueError, d.update, [(1, 2, 3)])
# no test_fromkeys or test_copy as both os.environ and selves don't support it
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.weak import WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import (
find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
TestCase,
)
from torch.utils.weak import _WeakHashRef, WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_weak.py
|
test_get
|
def test_get(self):
d = self._empty_mapping()
self.assertTrue(d.get(list(self.other.keys())[0]) is None)
self.assertEqual(d.get(list(self.other.keys())[0], 3), 3)
d = self.reference
self.assertTrue(d.get(list(self.other.keys())[0]) is None)
self.assertEqual(d.get(list(self.other.keys())[0], 3), 3)
self.assertEqual(
d.get(list(self.inmapping.keys())[0]), list(self.inmapping.values())[0]
)
self.assertEqual(
d.get(list(self.inmapping.keys())[0], 3), list(self.inmapping.values())[0]
)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
|
def test_get(self):
d = self._empty_mapping()
self.assertTrue(d.get(next(iter(self.other.keys()))) is None)
self.assertEqual(d.get(next(iter(self.other.keys())), 3), 3)
d = self.reference
self.assertTrue(d.get(next(iter(self.other.keys()))) is None)
self.assertEqual(d.get(next(iter(self.other.keys())), 3), 3)
self.assertEqual(
d.get(next(iter(self.inmapping.keys()))),
next(iter(self.inmapping.values())),
)
self.assertEqual(
d.get(next(iter(self.inmapping.keys())), 3),
next(iter(self.inmapping.values())),
)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.weak import WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import (
find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
TestCase,
)
from torch.utils.weak import _WeakHashRef, WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_weak.py
|
test_pop
|
def test_pop(self):
d = self._empty_mapping()
k, v = list(self.inmapping.items())[0]
d[k] = v
self.assertRaises(KeyError, d.pop, list(self.other.keys())[0])
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
|
def test_pop(self):
d = self._empty_mapping()
k, v = next(iter(self.inmapping.items()))
d[k] = v
self.assertRaises(KeyError, d.pop, next(iter(self.other.keys())))
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
# Adapted from cpython/Lib/test/mapping_tests.py
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.weak import WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import (
find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
TestCase,
)
from torch.utils.weak import _WeakHashRef, WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_xpu.py
|
test_out_of_memory
|
def test_out_of_memory(self):
tensor = torch.zeros(1024, device="xpu")
with self.assertRaisesRegex(RuntimeError, "Tried to allocate 800000000.00 GiB"):
torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device="xpu")
with self.assertRaisesRegex(RuntimeError, "XPU out of memory."):
torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device="xpu")
|
import subprocess
import sys
import tempfile
import unittest
import torch
import torch.xpu._gpu_trace as gpu_trace
from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyXPU,
OpDTypes,
ops,
)
from torch.testing._internal.common_methods_invocations import ops_and_refs
from torch.testing._internal.common_utils import (
NoTest,
run_tests,
suppress_warnings,
TEST_WITH_UBSAN,
TEST_XPU,
TestCase,
)
from torch.utils.checkpoint import checkpoint_sequential
TEST_MULTIXPU = torch.xpu.device_count() > 1
cpu_device = torch.device("cpu")
xpu_device = torch.device("xpu")
any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one
_xpu_computation_op_list = [
"fill",
"zeros",
"zeros_like",
"clone",
"view_as_real",
"view_as_complex",
"view",
"resize_",
"resize_as_",
"add",
"sub",
"mul",
"div",
"abs",
]
_xpu_tensor_factory_op_list = [
"as_strided",
"empty",
"empty_strided",
]
_xpu_not_test_dtype_op_list = [
"resize_", # Skipped by CPU
"resize_as_", # Skipped by CPU
"abs", # Not aligned dtype
]
_xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list
_xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list]
_xpu_computation_ops = [
op for op in ops_and_refs if op.name in _xpu_computation_op_list
]
class TestXpu(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_xpu.py
|
test_raises_oom
|
def test_raises_oom(self):
torch.xpu.memory.empty_cache()
with self.assertRaises(torch.OutOfMemoryError):
torch.empty(1024 * 1024 * 1024 * 1024, device="xpu")
|
import subprocess
import sys
import tempfile
import unittest
import torch
import torch.xpu._gpu_trace as gpu_trace
from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyXPU,
OpDTypes,
ops,
)
from torch.testing._internal.common_methods_invocations import ops_and_refs
from torch.testing._internal.common_utils import (
NoTest,
run_tests,
suppress_warnings,
TEST_WITH_UBSAN,
TEST_XPU,
TestCase,
)
from torch.utils.checkpoint import checkpoint_sequential
TEST_MULTIXPU = torch.xpu.device_count() > 1
cpu_device = torch.device("cpu")
xpu_device = torch.device("xpu")
any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one
_xpu_computation_op_list = [
"fill",
"zeros",
"zeros_like",
"clone",
"view_as_real",
"view_as_complex",
"view",
"resize_",
"resize_as_",
"add",
"sub",
"mul",
"div",
"abs",
]
_xpu_tensor_factory_op_list = [
"as_strided",
"empty",
"empty_strided",
]
_xpu_not_test_dtype_op_list = [
"resize_", # Skipped by CPU
"resize_as_", # Skipped by CPU
"abs", # Not aligned dtype
]
_xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list
_xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list]
_xpu_computation_ops = [
op for op in ops_and_refs if op.name in _xpu_computation_op_list
]
class TestXpu(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_xpu.py
|
test_device_memory_allocated
|
def test_device_memory_allocated(self):
device_count = torch.xpu.device_count()
current_alloc = [torch.xpu.memory_allocated(idx) for idx in range(device_count)]
x = torch.ones(10, device="xpu:0")
self.assertGreater(torch.xpu.memory_allocated(0), current_alloc[0])
self.assertTrue(
all(
torch.xpu.memory_allocated(idx) == current_alloc[idx]
for idx in range(1, device_count)
)
)
|
import subprocess
import sys
import tempfile
import unittest
import torch
import torch.xpu._gpu_trace as gpu_trace
from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyXPU,
OpDTypes,
ops,
)
from torch.testing._internal.common_methods_invocations import ops_and_refs
from torch.testing._internal.common_utils import (
NoTest,
run_tests,
suppress_warnings,
TEST_WITH_UBSAN,
TEST_XPU,
TestCase,
)
from torch.utils.checkpoint import checkpoint_sequential
TEST_MULTIXPU = torch.xpu.device_count() > 1
cpu_device = torch.device("cpu")
xpu_device = torch.device("xpu")
any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one
_xpu_computation_op_list = [
"fill",
"zeros",
"zeros_like",
"clone",
"view_as_real",
"view_as_complex",
"view",
"resize_",
"resize_as_",
"add",
"sub",
"mul",
"div",
"abs",
]
_xpu_tensor_factory_op_list = [
"as_strided",
"empty",
"empty_strided",
]
_xpu_not_test_dtype_op_list = [
"resize_", # Skipped by CPU
"resize_as_", # Skipped by CPU
"abs", # Not aligned dtype
]
_xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list
_xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list]
_xpu_computation_ops = [
op for op in ops_and_refs if op.name in _xpu_computation_op_list
]
class TestXpu(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_xpu.py
|
setUp
|
def setUp(self):
super().setUp()
self.autocast_lists = AutocastTestLists(torch.device("xpu"))
|
import subprocess
import sys
import tempfile
import unittest
import torch
import torch.xpu._gpu_trace as gpu_trace
from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyXPU,
OpDTypes,
ops,
)
from torch.testing._internal.common_methods_invocations import ops_and_refs
from torch.testing._internal.common_utils import (
NoTest,
run_tests,
suppress_warnings,
TEST_WITH_UBSAN,
TEST_XPU,
TestCase,
)
from torch.utils.checkpoint import checkpoint_sequential
TEST_MULTIXPU = torch.xpu.device_count() > 1
cpu_device = torch.device("cpu")
xpu_device = torch.device("xpu")
any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one
_xpu_computation_op_list = [
"fill",
"zeros",
"zeros_like",
"clone",
"view_as_real",
"view_as_complex",
"view",
"resize_",
"resize_as_",
"add",
"sub",
"mul",
"div",
"abs",
]
_xpu_tensor_factory_op_list = [
"as_strided",
"empty",
"empty_strided",
]
_xpu_not_test_dtype_op_list = [
"resize_", # Skipped by CPU
"resize_as_", # Skipped by CPU
"abs", # Not aligned dtype
]
_xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list
_xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list]
_xpu_computation_ops = [
op for op in ops_and_refs if op.name in _xpu_computation_op_list
]
class TestXpuAutocast(TestAutocast):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_xpu.py
|
tearDown
|
def tearDown(self):
del self.autocast_lists
super().tearDown()
|
import subprocess
import sys
import tempfile
import unittest
import torch
import torch.xpu._gpu_trace as gpu_trace
from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyXPU,
OpDTypes,
ops,
)
from torch.testing._internal.common_methods_invocations import ops_and_refs
from torch.testing._internal.common_utils import (
NoTest,
run_tests,
suppress_warnings,
TEST_WITH_UBSAN,
TEST_XPU,
TestCase,
)
from torch.utils.checkpoint import checkpoint_sequential
TEST_MULTIXPU = torch.xpu.device_count() > 1
cpu_device = torch.device("cpu")
xpu_device = torch.device("xpu")
any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one
_xpu_computation_op_list = [
"fill",
"zeros",
"zeros_like",
"clone",
"view_as_real",
"view_as_complex",
"view",
"resize_",
"resize_as_",
"add",
"sub",
"mul",
"div",
"abs",
]
_xpu_tensor_factory_op_list = [
"as_strided",
"empty",
"empty_strided",
]
_xpu_not_test_dtype_op_list = [
"resize_", # Skipped by CPU
"resize_as_", # Skipped by CPU
"abs", # Not aligned dtype
]
_xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list
_xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list]
_xpu_computation_ops = [
op for op in ops_and_refs if op.name in _xpu_computation_op_list
]
class TestXpuAutocast(TestAutocast):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_xpu.py
|
test_autocast_torch_fp16
|
def test_autocast_torch_fp16(self):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if op in self.skip_list:
skip_test = True # skip unimplemented op
if len(op_with_args) == 3:
skip_test = True # skip cudnn op
if not skip_test:
self._run_autocast_outofplace(
op, args, torch.float16, device="xpu", amp_dtype=torch.float16
)
|
import subprocess
import sys
import tempfile
import unittest
import torch
import torch.xpu._gpu_trace as gpu_trace
from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyXPU,
OpDTypes,
ops,
)
from torch.testing._internal.common_methods_invocations import ops_and_refs
from torch.testing._internal.common_utils import (
NoTest,
run_tests,
suppress_warnings,
TEST_WITH_UBSAN,
TEST_XPU,
TestCase,
)
from torch.utils.checkpoint import checkpoint_sequential
TEST_MULTIXPU = torch.xpu.device_count() > 1
cpu_device = torch.device("cpu")
xpu_device = torch.device("xpu")
any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one
_xpu_computation_op_list = [
"fill",
"zeros",
"zeros_like",
"clone",
"view_as_real",
"view_as_complex",
"view",
"resize_",
"resize_as_",
"add",
"sub",
"mul",
"div",
"abs",
]
_xpu_tensor_factory_op_list = [
"as_strided",
"empty",
"empty_strided",
]
_xpu_not_test_dtype_op_list = [
"resize_", # Skipped by CPU
"resize_as_", # Skipped by CPU
"abs", # Not aligned dtype
]
_xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list
_xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list]
_xpu_computation_ops = [
op for op in ops_and_refs if op.name in _xpu_computation_op_list
]
class TestXpuAutocast(TestAutocast):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_xpu.py
|
test_autocast_torch_bf16
|
def test_autocast_torch_bf16(self):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if op in self.skip_list:
skip_test = True # skip unimplemented op
if len(op_with_args) == 3:
skip_test = True # skip cudnn op
if not skip_test:
self._run_autocast_outofplace(op, args, torch.bfloat16, device="xpu")
|
import subprocess
import sys
import tempfile
import unittest
import torch
import torch.xpu._gpu_trace as gpu_trace
from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyXPU,
OpDTypes,
ops,
)
from torch.testing._internal.common_methods_invocations import ops_and_refs
from torch.testing._internal.common_utils import (
NoTest,
run_tests,
suppress_warnings,
TEST_WITH_UBSAN,
TEST_XPU,
TestCase,
)
from torch.utils.checkpoint import checkpoint_sequential
TEST_MULTIXPU = torch.xpu.device_count() > 1
cpu_device = torch.device("cpu")
xpu_device = torch.device("xpu")
any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one
_xpu_computation_op_list = [
"fill",
"zeros",
"zeros_like",
"clone",
"view_as_real",
"view_as_complex",
"view",
"resize_",
"resize_as_",
"add",
"sub",
"mul",
"div",
"abs",
]
_xpu_tensor_factory_op_list = [
"as_strided",
"empty",
"empty_strided",
]
_xpu_not_test_dtype_op_list = [
"resize_", # Skipped by CPU
"resize_as_", # Skipped by CPU
"abs", # Not aligned dtype
]
_xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list
_xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list]
_xpu_computation_ops = [
op for op in ops_and_refs if op.name in _xpu_computation_op_list
]
class TestXpuAutocast(TestAutocast):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_xpu.py
|
test_autocast_torch_need_autocast_promote
|
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(
op, args, torch.float32, device="xpu", amp_dtype=torch.float16
)
|
import subprocess
import sys
import tempfile
import unittest
import torch
import torch.xpu._gpu_trace as gpu_trace
from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyXPU,
OpDTypes,
ops,
)
from torch.testing._internal.common_methods_invocations import ops_and_refs
from torch.testing._internal.common_utils import (
NoTest,
run_tests,
suppress_warnings,
TEST_WITH_UBSAN,
TEST_XPU,
TestCase,
)
from torch.utils.checkpoint import checkpoint_sequential
TEST_MULTIXPU = torch.xpu.device_count() > 1
cpu_device = torch.device("cpu")
xpu_device = torch.device("xpu")
any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one
_xpu_computation_op_list = [
"fill",
"zeros",
"zeros_like",
"clone",
"view_as_real",
"view_as_complex",
"view",
"resize_",
"resize_as_",
"add",
"sub",
"mul",
"div",
"abs",
]
_xpu_tensor_factory_op_list = [
"as_strided",
"empty",
"empty_strided",
]
_xpu_not_test_dtype_op_list = [
"resize_", # Skipped by CPU
"resize_as_", # Skipped by CPU
"abs", # Not aligned dtype
]
_xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list
_xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list]
_xpu_computation_ops = [
op for op in ops_and_refs if op.name in _xpu_computation_op_list
]
class TestXpuAutocast(TestAutocast):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_xpu.py
|
test_autocast_torch_expect_builtin_promote
|
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(
op,
args,
torch.float32,
device="xpu",
out_type=out_type,
amp_dtype=torch.float16,
)
|
import subprocess
import sys
import tempfile
import unittest
import torch
import torch.xpu._gpu_trace as gpu_trace
from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyXPU,
OpDTypes,
ops,
)
from torch.testing._internal.common_methods_invocations import ops_and_refs
from torch.testing._internal.common_utils import (
NoTest,
run_tests,
suppress_warnings,
TEST_WITH_UBSAN,
TEST_XPU,
TestCase,
)
from torch.utils.checkpoint import checkpoint_sequential
TEST_MULTIXPU = torch.xpu.device_count() > 1
cpu_device = torch.device("cpu")
xpu_device = torch.device("xpu")
any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one
_xpu_computation_op_list = [
"fill",
"zeros",
"zeros_like",
"clone",
"view_as_real",
"view_as_complex",
"view",
"resize_",
"resize_as_",
"add",
"sub",
"mul",
"div",
"abs",
]
_xpu_tensor_factory_op_list = [
"as_strided",
"empty",
"empty_strided",
]
_xpu_not_test_dtype_op_list = [
"resize_", # Skipped by CPU
"resize_as_", # Skipped by CPU
"abs", # Not aligned dtype
]
_xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list
_xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list]
_xpu_computation_ops = [
op for op in ops_and_refs if op.name in _xpu_computation_op_list
]
class TestXpuAutocast(TestAutocast):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_weak.py
|
test_keys
|
def test_keys(self):
d = self._empty_mapping()
self.assertEqual(list(d.keys()), [])
d = self.reference
self.assertIn(list(self.inmapping.keys())[0], d.keys())
self.assertNotIn(list(self.other.keys())[0], d.keys())
self.assertRaises(TypeError, d.keys, None)
|
def test_keys(self):
d = self._empty_mapping()
self.assertEqual(list(d.keys()), [])
d = self.reference
self.assertIn(next(iter(self.inmapping.keys())), d.keys())
self.assertNotIn(next(iter(self.other.keys())), d.keys())
self.assertRaises(TypeError, d.keys, None)
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.weak import WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import (
find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
TestCase,
)
from torch.utils.weak import _WeakHashRef, WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_weak.py
|
test_getitem
|
def test_getitem(self):
d = self.reference
self.assertEqual(
d[list(self.inmapping.keys())[0]], list(self.inmapping.values())[0]
)
self.assertRaises(TypeError, d.__getitem__)
|
def test_getitem(self):
d = self.reference
self.assertEqual(
d[next(iter(self.inmapping.keys()))], next(iter(self.inmapping.values()))
)
self.assertRaises(TypeError, d.__getitem__)
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.weak import WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import (
find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
TestCase,
)
from torch.utils.weak import _WeakHashRef, WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_weak.py
|
test_update
|
def test_update(self):
# mapping argument
d = self._empty_mapping()
d.update(self.other)
self.assertEqual(list(d.items()), list(self.other.items()))
# No argument
d = self._empty_mapping()
d.update()
self.assertEqual(d, self._empty_mapping())
# item sequence
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# Iterator
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# FIXME: Doesn't work with UserDict
# self.assertRaises((TypeError, AttributeError), d.update, None)
self.assertRaises((TypeError, AttributeError), d.update, 42)
outerself = self
class SimpleUserDict:
def __init__(self):
self.d = outerself.reference
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
i1 = sorted((id(k), v) for k, v in d.items())
i2 = sorted((id(k), v) for k, v in self.reference.items())
self.assertEqual(i1, i2)
class Exc(Exception):
pass
d = self._empty_mapping()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d.clear()
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return "a"
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord("a")
def __iter__(self):
return self
def __next__(self):
if self.i <= ord("z"):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d = self._empty_mapping()
class badseq:
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, d.update, badseq())
self.assertRaises(ValueError, d.update, [(1, 2, 3)])
# no test_fromkeys or test_copy as both os.environ and selves don't support it
|
def test_update(self):
# mapping argument
d = self._empty_mapping()
d.update(self.other)
self.assertEqual(list(d.items()), list(self.other.items()))
# No argument
d = self._empty_mapping()
d.update()
self.assertEqual(d, self._empty_mapping())
# item sequence
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# Iterator
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# FIXME: Doesn't work with UserDict
# self.assertRaises((TypeError, AttributeError), d.update, None)
self.assertRaises((TypeError, AttributeError), d.update, 42)
outerself = self
class SimpleUserDict:
def __init__(self) -> None:
self.d = outerself.reference
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
i1 = sorted((id(k), v) for k, v in d.items())
i2 = sorted((id(k), v) for k, v in self.reference.items())
self.assertEqual(i1, i2)
class Exc(Exception):
pass
d = self._empty_mapping()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d.clear()
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self) -> None:
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return "a"
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self) -> None:
self.i = ord("a")
def __iter__(self):
return self
def __next__(self):
if self.i <= ord("z"):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d = self._empty_mapping()
class badseq:
def __iter__(self):
return self
def __next__(self):
raise Exc
self.assertRaises(Exc, d.update, badseq())
self.assertRaises(ValueError, d.update, [(1, 2, 3)])
# no test_fromkeys or test_copy as both os.environ and selves don't support it
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.weak import WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import (
find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
TestCase,
)
from torch.utils.weak import _WeakHashRef, WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_weak.py
|
setUp
|
def setUp(self):
if IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import (
find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
TestCase,
)
from torch.utils.weak import _WeakHashRef, WeakIdKeyDictionary
class WeakKeyDictionaryScriptObjectTestCase(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_weak.py
|
test_read
|
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) # workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
# Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = list(self.other.keys())[0]
self.assertRaises(KeyError, lambda: d[knownkey])
# len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
# __contains__
for k in self.reference:
self.assertIn(k, d)
for k in self.other:
self.assertNotIn(k, d)
# cmp
self.assertTrue(
p == p
) # NB: don't use assertEqual, that doesn't actually use ==
self.assertTrue(d == d)
self.assertTrue(p != d)
self.assertTrue(d != p)
# bool
if p:
self.fail("Empty mapping must compare to False")
if not d:
self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assertTrue(hasattr(iter, "__next__"))
self.assertTrue(hasattr(iter, "__iter__"))
x = list(iter)
self.assertTrue(set(x) == set(lst) == set(ref))
check_iterandlist(iter(d.keys()), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d.values()), list(d.values()), self.reference.values())
check_iterandlist(iter(d.items()), list(d.items()), self.reference.items())
# get
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.assertNotIn(knownkey, d)
|
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) # workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
# Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = next(iter(self.other.keys()))
self.assertRaises(KeyError, lambda: d[knownkey])
# len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
# __contains__
for k in self.reference:
self.assertIn(k, d)
for k in self.other:
self.assertNotIn(k, d)
# cmp
self.assertTrue(
p == p
) # NB: don't use assertEqual, that doesn't actually use ==
self.assertTrue(d == d)
self.assertTrue(p != d)
self.assertTrue(d != p)
# bool
if p:
self.fail("Empty mapping must compare to False")
if not d:
self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assertTrue(hasattr(iter, "__next__"))
self.assertTrue(hasattr(iter, "__iter__"))
x = list(iter)
self.assertTrue(set(x) == set(lst) == set(ref))
check_iterandlist(iter(d.keys()), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d.values()), list(d.values()), self.reference.values())
check_iterandlist(iter(d.items()), list(d.items()), self.reference.items())
# get
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.assertNotIn(knownkey, d)
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.weak import WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import (
find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
TestCase,
)
from torch.utils.weak import _WeakHashRef, WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_weak.py
|
test_get
|
def test_get(self):
d = self._empty_mapping()
self.assertTrue(d.get(list(self.other.keys())[0]) is None)
self.assertEqual(d.get(list(self.other.keys())[0], 3), 3)
d = self.reference
self.assertTrue(d.get(list(self.other.keys())[0]) is None)
self.assertEqual(d.get(list(self.other.keys())[0], 3), 3)
self.assertEqual(
d.get(list(self.inmapping.keys())[0]), list(self.inmapping.values())[0]
)
self.assertEqual(
d.get(list(self.inmapping.keys())[0], 3), list(self.inmapping.values())[0]
)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
|
def test_get(self):
d = self._empty_mapping()
self.assertTrue(d.get(next(iter(self.other.keys()))) is None)
self.assertEqual(d.get(next(iter(self.other.keys())), 3), 3)
d = self.reference
self.assertTrue(d.get(next(iter(self.other.keys()))) is None)
self.assertEqual(d.get(next(iter(self.other.keys())), 3), 3)
self.assertEqual(
d.get(next(iter(self.inmapping.keys()))),
next(iter(self.inmapping.values())),
)
self.assertEqual(
d.get(next(iter(self.inmapping.keys())), 3),
next(iter(self.inmapping.values())),
)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.weak import WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import (
find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
TestCase,
)
from torch.utils.weak import _WeakHashRef, WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_weak.py
|
test_pop
|
def test_pop(self):
d = self._empty_mapping()
k, v = list(self.inmapping.items())[0]
d[k] = v
self.assertRaises(KeyError, d.pop, list(self.other.keys())[0])
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
|
def test_pop(self):
d = self._empty_mapping()
k, v = next(iter(self.inmapping.items()))
d[k] = v
self.assertRaises(KeyError, d.pop, next(iter(self.other.keys())))
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
# Adapted from cpython/Lib/test/mapping_tests.py
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.utils.weak import WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
import copy
import gc
import random
import threading
import unittest
import torch
from torch.testing._internal.common_utils import (
find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
TestCase,
)
from torch.utils.weak import _WeakHashRef, WeakIdKeyDictionary
class WeakKeyDictionaryTestCase(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_xpu.py
|
test_stream_priority
|
def test_stream_priority(self):
low, high = torch.xpu.Stream.priority_range()
s0 = torch.xpu.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
self.assertEqual(torch.device("xpu:0"), s0.device)
s1 = torch.xpu.Stream(device=0, priority=high)
self.assertEqual(high, s1.priority)
self.assertEqual(torch.device("xpu:0"), s1.device)
|
import subprocess
import sys
import tempfile
import unittest
import torch
import torch.xpu._gpu_trace as gpu_trace
from torch.testing._internal.autocast_test_lists import AutocastTestLists, TestAutocast
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
onlyXPU,
OpDTypes,
ops,
)
from torch.testing._internal.common_methods_invocations import ops_and_refs
from torch.testing._internal.common_utils import (
NoTest,
run_tests,
suppress_warnings,
TEST_WITH_UBSAN,
TEST_XPU,
TestCase,
)
from torch.utils.checkpoint import checkpoint_sequential
TEST_MULTIXPU = torch.xpu.device_count() > 1
cpu_device = torch.device("cpu")
xpu_device = torch.device("xpu")
any_common_cpu_xpu_one = OpDTypes.any_common_cpu_cuda_one
_xpu_computation_op_list = [
"fill",
"zeros",
"zeros_like",
"clone",
"view_as_real",
"view_as_complex",
"view",
"resize_",
"resize_as_",
"add",
"sub",
"mul",
"div",
"abs",
]
_xpu_tensor_factory_op_list = [
"as_strided",
"empty",
"empty_strided",
]
_xpu_not_test_dtype_op_list = [
"resize_", # Skipped by CPU
"resize_as_", # Skipped by CPU
"abs", # Not aligned dtype
]
_xpu_all_op_list = _xpu_computation_op_list + _xpu_tensor_factory_op_list
_xpu_all_ops = [op for op in ops_and_refs if op.name in _xpu_all_op_list]
_xpu_computation_ops = [
op for op in ops_and_refs if op.name in _xpu_computation_op_list
]
class TestXpu(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.