library
stringclasses 1
value | test_file
stringclasses 785
values | test_function
stringlengths 1
295
| before
stringlengths 0
448k
| after
stringlengths 0
487k
| context_before
stringclasses 947
values | context_after
stringlengths 0
16.3k
| commit_before
stringclasses 1
value | commit_after
stringclasses 1
value | change_type
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|---|
torch
|
test/test_tensorboard.py
|
test_int_tensor_proto
|
def test_int_tensor_proto(self):
int_values = [1, 2, 3]
actual_proto = (
tensor_proto("dummy", torch.tensor(int_values, dtype=torch.int32))
.value[0]
.tensor
)
self.assertEqual(actual_proto.int_val, int_values)
self.assertTrue(actual_proto.dtype == DataType.DT_INT32)
|
import io
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import expecttest
import numpy as np
TEST_TENSORBOARD = True
import tensorboard.summary.writer.event_file_writer # noqa: F401
from tensorboard.compat.proto.summary_pb2 import Summary
HAS_TORCHVISION = True
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
TEST_MATPLOTLIB = True
import matplotlib
import matplotlib.pyplot as plt
skipIfNoMatplotlib = unittest.skipIf(not TEST_MATPLOTLIB, "no matplotlib")
import torch
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
IS_MACOS,
IS_WINDOWS,
parametrize,
run_tests,
TEST_WITH_CROSSREF,
TestCase,
skipIfTorchDynamo,
)
from google.protobuf import text_format
from PIL import Image
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.types_pb2 import DataType
from torch.utils.tensorboard import summary, SummaryWriter
from torch.utils.tensorboard._convert_np import make_np
from torch.utils.tensorboard._pytorch_graph import graph
from torch.utils.tensorboard._utils import _prepare_video, convert_to_HWC
from torch.utils.tensorboard.summary import int_to_half, tensor_proto
freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]
true_positive_counts = [75, 64, 21, 5, 0]
false_positive_counts = [150, 105, 18, 0, 0]
true_negative_counts = [0, 45, 132, 150, 150]
false_negative_counts = [0, 11, 54, 70, 75]
precision = [0.3333333, 0.3786982, 0.5384616, 1.0, 0.0]
recall = [1.0, 0.8533334, 0.28, 0.0666667, 0.0]
import moviepy # noqa: F401
class TestTensorProtoSummary(BaseTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_tensorboard.py
|
test_complex_tensor_proto
|
def test_complex_tensor_proto(self):
real = torch.tensor([1.0, 2.0])
imag = torch.tensor([3.0, 4.0])
actual_proto = (
tensor_proto("dummy", torch.complex(real, imag)).value[0].tensor
)
self.assertEqual(actual_proto.scomplex_val, [1.0, 3.0, 2.0, 4.0])
|
import io
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import expecttest
import numpy as np
TEST_TENSORBOARD = True
import tensorboard.summary.writer.event_file_writer # noqa: F401
from tensorboard.compat.proto.summary_pb2 import Summary
HAS_TORCHVISION = True
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
TEST_MATPLOTLIB = True
import matplotlib
import matplotlib.pyplot as plt
skipIfNoMatplotlib = unittest.skipIf(not TEST_MATPLOTLIB, "no matplotlib")
import torch
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
IS_MACOS,
IS_WINDOWS,
parametrize,
run_tests,
TEST_WITH_CROSSREF,
TestCase,
skipIfTorchDynamo,
)
from google.protobuf import text_format
from PIL import Image
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.types_pb2 import DataType
from torch.utils.tensorboard import summary, SummaryWriter
from torch.utils.tensorboard._convert_np import make_np
from torch.utils.tensorboard._pytorch_graph import graph
from torch.utils.tensorboard._utils import _prepare_video, convert_to_HWC
from torch.utils.tensorboard.summary import int_to_half, tensor_proto
freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]
true_positive_counts = [75, 64, 21, 5, 0]
false_positive_counts = [150, 105, 18, 0, 0]
true_negative_counts = [0, 45, 132, 150, 150]
false_negative_counts = [0, 11, 54, 70, 75]
precision = [0.3333333, 0.3786982, 0.5384616, 1.0, 0.0]
recall = [1.0, 0.8533334, 0.28, 0.0666667, 0.0]
import moviepy # noqa: F401
class TestTensorProtoSummary(BaseTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_tensorboard.py
|
test_empty_tensor_proto
|
if __name__ == '__main__':
run_tests()
|
def test_empty_tensor_proto(self):
actual_proto = tensor_proto("dummy", torch.empty(0)).value[0].tensor
self.assertEqual(actual_proto.float_val, [])
|
import io
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import expecttest
import numpy as np
TEST_TENSORBOARD = True
import tensorboard.summary.writer.event_file_writer # noqa: F401
from tensorboard.compat.proto.summary_pb2 import Summary
HAS_TORCHVISION = True
import torchvision
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
TEST_MATPLOTLIB = True
import matplotlib
import matplotlib.pyplot as plt
skipIfNoMatplotlib = unittest.skipIf(not TEST_MATPLOTLIB, "no matplotlib")
import torch
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
IS_MACOS,
IS_WINDOWS,
parametrize,
run_tests,
TEST_WITH_CROSSREF,
TestCase,
skipIfTorchDynamo,
)
from google.protobuf import text_format
from PIL import Image
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.types_pb2 import DataType
from torch.utils.tensorboard import summary, SummaryWriter
from torch.utils.tensorboard._convert_np import make_np
from torch.utils.tensorboard._pytorch_graph import graph
from torch.utils.tensorboard._utils import _prepare_video, convert_to_HWC
from torch.utils.tensorboard.summary import int_to_half, tensor_proto
freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]
true_positive_counts = [75, 64, 21, 5, 0]
false_positive_counts = [150, 105, 18, 0, 0]
true_negative_counts = [0, 45, 132, 150, 150]
false_negative_counts = [0, 11, 54, 70, 75]
precision = [0.3333333, 0.3786982, 0.5384616, 1.0, 0.0]
recall = [1.0, 0.8533334, 0.28, 0.0666667, 0.0]
import moviepy # noqa: F401
class TestTensorProtoSummary(BaseTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_tensorexpr.py
|
test_alias_analysis_module
|
def test_alias_analysis_module(self):
class AliasModule(nn.Module):
def __init__(self):
super().__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
def forward(self, x, y, z):
z = z + self.a
self.b.add_(y)
w = z + self.a
z = w + x
return z
x = torch.randn(128, 128)
def getModule(script):
am = AliasModule()
if script:
return torch.jit.script(am)
return am
am = getModule(False)
am_s = getModule(True)
ref = am(x, x, x)
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
# Now do the aliasing
am.a = am.b
ref = am(x, x, x)
am_s.a = am_s.b
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
|
def test_alias_analysis_module(self):
class AliasModule(nn.Module):
def __init__(self) -> None:
super().__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
def forward(self, x, y, z):
z = z + self.a
self.b.add_(y)
w = z + self.a
z = w + x
return z
x = torch.randn(128, 128)
def getModule(script):
am = AliasModule()
if script:
return torch.jit.script(am)
return am
am = getModule(False)
am_s = getModule(True)
ref = am(x, x, x)
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
# Now do the aliasing
am.a = am.b
ref = am(x, x, x)
am_s.a = am_s.b
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
@skipIfTorchDynamo()
class TestTensorExprFuser(BaseTestClass):
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
@skipIfTorchDynamo()
class TestTensorExprFuser(BaseTestClass):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_tensorexpr.py
|
__init__
|
def __init__(self):
super().__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
|
def __init__(self) -> None:
super().__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
class AliasModule(nn.Module):
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
class AliasModule(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_tensorexpr.py
|
test_alias_analysis_inputs
|
def test_alias_analysis_inputs(self):
class AliasModule(nn.Module):
def __init__(self):
super().__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
def forward(self, x, y, z):
x.add_(y)
w = z + self.a
z = w + x
return z
def getModule(script):
am = AliasModule()
if script:
return torch.jit.script(am)
return am
am = getModule(False)
am_s = getModule(True)
torch.manual_seed(1337)
x = torch.randn(128, 128)
ref = am(x, x, x)
torch.manual_seed(1337)
x = torch.randn(128, 128)
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
|
def test_alias_analysis_inputs(self):
class AliasModule(nn.Module):
def __init__(self) -> None:
super().__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
def forward(self, x, y, z):
x.add_(y)
w = z + self.a
z = w + x
return z
def getModule(script):
am = AliasModule()
if script:
return torch.jit.script(am)
return am
am = getModule(False)
am_s = getModule(True)
torch.manual_seed(1337)
x = torch.randn(128, 128)
ref = am(x, x, x)
torch.manual_seed(1337)
x = torch.randn(128, 128)
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
@skipIfTorchDynamo()
class TestTensorExprFuser(BaseTestClass):
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
@skipIfTorchDynamo()
class TestTensorExprFuser(BaseTestClass):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_tensorexpr.py
|
__init__
|
def __init__(self):
super().__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
|
def __init__(self) -> None:
super().__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
class AliasModule(nn.Module):
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
class AliasModule(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_tensorexpr.py
|
test_alias_analysis_input_and_module
|
def test_alias_analysis_input_and_module(self):
class AliasModule(nn.Module):
def __init__(self):
super().__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
def forward(self, x, y, z):
x.add_(y)
w = z + self.b
z = w + x
return z
def getModule(script):
am = AliasModule()
if script:
return torch.jit.script(am)
return am
am = getModule(False)
am_s = getModule(True)
torch.manual_seed(1337)
x = torch.randn(128, 128)
am.b = x
ref = am(x, x, x)
torch.manual_seed(1337)
x = torch.randn(128, 128)
am_s.b = x
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
|
def test_alias_analysis_input_and_module(self):
class AliasModule(nn.Module):
def __init__(self) -> None:
super().__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
def forward(self, x, y, z):
x.add_(y)
w = z + self.b
z = w + x
return z
def getModule(script):
am = AliasModule()
if script:
return torch.jit.script(am)
return am
am = getModule(False)
am_s = getModule(True)
torch.manual_seed(1337)
x = torch.randn(128, 128)
am.b = x
ref = am(x, x, x)
torch.manual_seed(1337)
x = torch.randn(128, 128)
am_s.b = x
test = am_s(x, x, x)
torch.testing.assert_close(ref, test)
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
@skipIfTorchDynamo()
class TestTensorExprFuser(BaseTestClass):
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
@skipIfTorchDynamo()
class TestTensorExprFuser(BaseTestClass):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_tensorexpr.py
|
__init__
|
def __init__(self):
super().__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
|
def __init__(self) -> None:
super().__init__()
torch.manual_seed(1337)
self.a = torch.randn(128, 128)
self.b = torch.randn(128, 128)
self.c = torch.randn(128, 128)
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
class AliasModule(nn.Module):
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
class AliasModule(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_tensorexpr_pybind.py
|
test_alloc_in_loop
|
def test_alloc_in_loop(self):
a, tmp, b = [
te.BufHandle(name, [1], torch.float32) for name in ["a", "tmp", "b"]
]
body = te.Block([tmp.store([0], a.load([0])), b.store([0], tmp.load([0]))])
for _ in range(4):
i = te.VarHandle("i", torch.int32)
body = te.For.make(i, 0, 100, body)
nest = te.LoopNest(body, [b])
nest.prepare_for_codegen()
f = te.construct_codegen("llvm", nest.simplify(), [a, b])
ta, tb = [torch.ones(1) for _ in range(2)]
f.call([ta.data_ptr(), tb.data_ptr()])
|
def test_alloc_in_loop(self):
a, tmp, b = (
te.BufHandle(name, [1], torch.float32) for name in ["a", "tmp", "b"]
)
body = te.Block([tmp.store([0], a.load([0])), b.store([0], tmp.load([0]))])
for _ in range(4):
i = te.VarHandle("i", torch.int32)
body = te.For.make(i, 0, 100, body)
nest = te.LoopNest(body, [b])
nest.prepare_for_codegen()
f = te.construct_codegen("llvm", nest.simplify(), [a, b])
ta, tb = (torch.ones(1) for _ in range(2))
f.call([ta.data_ptr(), tb.data_ptr()])
|
import torch
import numpy as np
import torch._C._te as te
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
import unittest
LLVM_ENABLED = torch._C._llvm_enabled()
class TestTensorExprPyBind(JitTestCase):
|
import torch
import numpy as np
import torch._C._te as te
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
import unittest
LLVM_ENABLED = torch._C._llvm_enabled()
class TestTensorExprPyBind(JitTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_tensorexpr.py
|
round
|
def round(x):
return torch.round(x)
for data_type in [torch.float32, torch.double]:
a = torch.tensor([0.2, 1.6, 2.5, 3.5]).to(data_type)
traced = torch.jit.trace(round, (a))
x = warmup_and_run_forward(traced, a)
self.assertLastGraphAllFused()
y = round(x)
self.assertEqual(x, y)
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_tensorexpr.py
|
test_scalar
|
def test_scalar(self):
@torch.jit.script
def test_float(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, a: float, b: float) -> torch.Tensor:
return torch.add(torch.add(x, y, alpha=a), z, alpha=b)
@torch.jit.script
def test_int(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, a: int, b: int) -> torch.Tensor:
return torch.add(torch.add(x, y, alpha=a), z, alpha=b)
for test in (test_float, test_int):
for data_type in self.dtypes:
x, y, z = [torch.rand(4, dtype=data_type) for i in range(3)]
a, b = 1, 2
test(x, y, z, a, b)
r = test(x, y, z, a, b)
self.assertEqual(r, x + y * a + z * b)
|
def test_scalar(self):
@torch.jit.script
def test_float(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, a: float, b: float) -> torch.Tensor:
return torch.add(torch.add(x, y, alpha=a), z, alpha=b)
@torch.jit.script
def test_int(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, a: int, b: int) -> torch.Tensor:
return torch.add(torch.add(x, y, alpha=a), z, alpha=b)
for test in (test_float, test_int):
for data_type in self.dtypes:
x, y, z = (torch.rand(4, dtype=data_type) for i in range(3))
a, b = 1, 2
test(x, y, z, a, b)
r = test(x, y, z, a, b)
self.assertEqual(r, x + y * a + z * b)
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
@skipIfTorchDynamo()
class TestTensorExprFuser(BaseTestClass):
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
import itertools
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, TensorExprTestOptions
LLVM_ENABLED = torch._C._llvm_enabled()
@skipIfTorchDynamo()
class TestTensorExprFuser(BaseTestClass):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_testing.py
|
check
|
def check(size, low, high, requires_grad, noncontiguous):
if dtype not in [torch.float, torch.cfloat]:
requires_grad = False
t = make_tensor(size, dtype=dtype, device=device, low=low, high=high,
requires_grad=requires_grad, noncontiguous=noncontiguous)
self.assertEqual(t.shape, size)
self.assertEqual(t.device, torch.device(device))
self.assertEqual(t.dtype, dtype)
low = -9 if low is None else low
high = 9 if high is None else high
if t.numel() > 0 and dtype in [torch.long, torch.float]:
self.assertTrue(t.le(high).logical_and(t.ge(low)).all().item())
self.assertEqual(t.requires_grad, requires_grad)
if t.numel() > 1:
self.assertEqual(t.is_contiguous(), not noncontiguous)
else:
self.assertTrue(t.is_contiguous())
for size in (tuple(), (0,), (1,), (1, 1), (2,), (2, 3), (8, 16, 32)):
check(size, None, None, False, False)
check(size, 2, 4, True, True)
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple, Generator
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, skipIfRocm, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_testing.py
|
get_dtype_limits
|
def get_dtype_limits(dtype):
if dtype is torch.bool:
return 0, 1
info = (torch.finfo if dtype.is_floating_point or dtype.is_complex else torch.iinfo)(dtype)
# We are using integer bounds here, because otherwise it would be impossible to pass `low` and `high`
# outside their valid range. Python uses 64bit floating point numbers and thus trying to do something like
# `torch.ffinfo(torch.float64)max * 2` will always result in `inf`. On the flipside, Pythons `int` is
# unbounded.
return int(info.min), int(info.max)
lowest_inclusive, highest_inclusive = get_dtype_limits(dtype)
with self.assertRaisesRegex(ValueError, ""):
low, high = (-2, -1) if lowest_inclusive == 0 else (lowest_inclusive * 4, lowest_inclusive * 2)
make_tensor(low=low, high=high)
with self.assertRaisesRegex(ValueError, ""):
make_tensor(low=highest_inclusive * 2, high=highest_inclusive * 4)
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_testing.py
|
test_low_high_boolean_integral2
|
def test_low_high_boolean_integral2(self, dtype, device):
shape = (10_000,)
if dtype is torch.bool:
low = 1
elif dtype is torch.int64:
# Due to its internals, `make_tensor` is not able to sample `torch.iinfo(torch.int64).max`
low = torch.iinfo(dtype).max - 1
else:
low = torch.iinfo(dtype).max
high = low + 1
actual = torch.testing.make_tensor(shape, dtype=dtype, device=device, low=low, high=high)
expected = torch.full(shape, low, dtype=dtype, device=device)
torch.testing.assert_close(actual, expected)
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestMakeTensor(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_testing.py
|
_get_test_names_for_test_class
|
def _get_test_names_for_test_class(test_cls):
""" Convenience function to get all test names for a given test class. """
test_names = ['{}.{}'.format(test_cls.__name__, key) for key in test_cls.__dict__
if key.startswith('test_')]
return sorted(test_names)
|
def _get_test_names_for_test_class(test_cls):
""" Convenience function to get all test names for a given test class. """
test_names = [f'{test_cls.__name__}.{key}' for key in test_cls.__dict__
if key.startswith('test_')]
return sorted(test_names)
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple, Generator
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, skipIfRocm, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_testing.py
|
_get_test_funcs_for_test_class
|
def _get_test_funcs_for_test_class(test_cls):
""" Convenience function to get all (test function, parametrized_name) pairs for a given test class. """
test_funcs = [(getattr(test_cls, key), key) for key in test_cls.__dict__ if key.startswith('test_')]
return test_funcs
class TestTestParametrization(TestCase):
def test_default_names(self):
class TestParametrized(TestCase):
@parametrize("x", range(5))
def test_default_names(self, x):
pass
@parametrize("x,y", [(1, 2), (2, 3), (3, 4)])
def test_two_things_default_names(self, x, y):
pass
instantiate_parametrized_tests(TestParametrized)
expected_test_names = [
'TestParametrized.test_default_names_x_0',
'TestParametrized.test_default_names_x_1',
'TestParametrized.test_default_names_x_2',
'TestParametrized.test_default_names_x_3',
'TestParametrized.test_default_names_x_4',
'TestParametrized.test_two_things_default_names_x_1_y_2',
'TestParametrized.test_two_things_default_names_x_2_y_3',
'TestParametrized.test_two_things_default_names_x_3_y_4',
]
test_names = _get_test_names_for_test_class(TestParametrized)
self.assertEqual(expected_test_names, test_names)
def test_name_fn(self):
class TestParametrized(TestCase):
@parametrize("bias", [False, True], name_fn=lambda b: 'bias' if b else 'no_bias')
def test_custom_names(self, bias):
pass
@parametrize("x", [1, 2], name_fn=str)
@parametrize("y", [3, 4], name_fn=str)
@parametrize("z", [5, 6], name_fn=str)
def test_three_things_composition_custom_names(self, x, y, z):
pass
@parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: '{}__{}'.format(x, y))
def test_two_things_custom_names_alternate(self, x, y):
pass
instantiate_parametrized_tests(TestParametrized)
expected_test_names = [
'TestParametrized.test_custom_names_bias',
'TestParametrized.test_custom_names_no_bias',
'TestParametrized.test_three_things_composition_custom_names_1_3_5',
'TestParametrized.test_three_things_composition_custom_names_1_3_6',
'TestParametrized.test_three_things_composition_custom_names_1_4_5',
'TestParametrized.test_three_things_composition_custom_names_1_4_6',
'TestParametrized.test_three_things_composition_custom_names_2_3_5',
'TestParametrized.test_three_things_composition_custom_names_2_3_6',
'TestParametrized.test_three_things_composition_custom_names_2_4_5',
'TestParametrized.test_three_things_composition_custom_names_2_4_6',
'TestParametrized.test_two_things_custom_names_alternate_1__2',
'TestParametrized.test_two_things_custom_names_alternate_1__3',
'TestParametrized.test_two_things_custom_names_alternate_1__4',
]
test_names = _get_test_names_for_test_class(TestParametrized)
self.assertEqual(expected_test_names, test_names)
def test_subtest_names(self):
class TestParametrized(TestCase):
@parametrize("bias", [subtest(True, name='bias'),
subtest(False, name='no_bias')])
def test_custom_names(self, bias):
pass
@parametrize("x,y", [subtest((1, 2), name='double'),
subtest((1, 3), name='triple'),
subtest((1, 4), name='quadruple')])
def test_two_things_custom_names(self, x, y):
pass
instantiate_parametrized_tests(TestParametrized)
expected_test_names = [
'TestParametrized.test_custom_names_bias',
'TestParametrized.test_custom_names_no_bias',
'TestParametrized.test_two_things_custom_names_double',
'TestParametrized.test_two_things_custom_names_quadruple',
'TestParametrized.test_two_things_custom_names_triple',
]
test_names = _get_test_names_for_test_class(TestParametrized)
self.assertEqual(expected_test_names, test_names)
def test_apply_param_specific_decorators(self):
# Test that decorators can be applied on a per-param basis.
def test_dec(func):
func._decorator_applied = True
return func
class TestParametrized(TestCase):
@parametrize("x", [subtest(1, name='one'),
subtest(2, name='two', decorators=[test_dec]),
subtest(3, name='three')])
def test_param(self, x):
pass
instantiate_parametrized_tests(TestParametrized)
for test_func, name in _get_test_funcs_for_test_class(TestParametrized):
self.assertEqual(hasattr(test_func, '_decorator_applied'), name == 'test_param_two')
def test_compose_param_specific_decorators(self):
# Test that multiple per-param decorators compose correctly.
def test_dec(func):
func._decorator_applied = True
return func
class TestParametrized(TestCase):
@parametrize("x", [subtest(1),
subtest(2, decorators=[test_dec]),
subtest(3)])
@parametrize("y", [subtest(False, decorators=[test_dec]),
subtest(True)])
def test_param(self, x, y):
pass
instantiate_parametrized_tests(TestParametrized)
for test_func, name in _get_test_funcs_for_test_class(TestParametrized):
# Decorator should be applied whenever either x == 2 or y == False.
should_apply = ('x_2' in name) or ('y_False' in name)
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
def test_modules_decorator_misuse_error(self):
# Test that @modules errors out when used with instantiate_parametrized_tests().
class TestParametrized(TestCase):
@modules(module_db)
def test_modules(self, module_info):
pass
with self.assertRaisesRegex(RuntimeError, 'intended to be used in a device-specific context'):
instantiate_parametrized_tests(TestParametrized)
def test_ops_decorator_misuse_error(self):
# Test that @ops errors out when used with instantiate_parametrized_tests().
class TestParametrized(TestCase):
@ops(op_db)
def test_ops(self, module_info):
pass
with self.assertRaisesRegex(RuntimeError, 'intended to be used in a device-specific context'):
instantiate_parametrized_tests(TestParametrized)
def test_multiple_handling_of_same_param_error(self):
# Test that multiple decorators handling the same param errors out.
class TestParametrized(TestCase):
@parametrize("x", range(3))
@parametrize("x", range(5))
def test_param(self, x):
pass
with self.assertRaisesRegex(RuntimeError, 'multiple parametrization decorators'):
instantiate_parametrized_tests(TestParametrized)
@parametrize("x", [1, subtest(2, decorators=[unittest.expectedFailure]), 3])
def test_subtest_expected_failure(self, x):
if x == 2:
raise RuntimeError('Boom')
@parametrize("x", [subtest(1, decorators=[unittest.expectedFailure]), 2, 3])
@parametrize("y", [4, 5, subtest(6, decorators=[unittest.expectedFailure])])
def test_two_things_subtest_expected_failure(self, x, y):
if x == 1 or y == 6:
raise RuntimeError('Boom')
class TestTestParametrizationDeviceType(TestCase):
def test_unparametrized_names(self, device):
# This test exists to protect against regressions in device / dtype test naming
# due to parametrization logic.
device = self.device_type
class TestParametrized(TestCase):
def test_device_specific(self, device):
pass
@dtypes(torch.float32, torch.float64)
def test_device_dtype_specific(self, device, dtype):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_device_dtype_specific_{}_float32',
'{}.test_device_dtype_specific_{}_float64',
'{}.test_device_specific_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_empty_param_names(self, device):
# If no param names are passed, ensure things still work without parametrization.
device = self.device_type
class TestParametrized(TestCase):
@parametrize("", [])
def test_foo(self, device):
pass
@parametrize("", range(5))
def test_bar(self, device):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_bar_{}',
'{}.test_foo_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_empty_param_list(self, device):
# If no param values are passed, ensure a helpful error message is thrown.
# In the wild, this could indicate reuse of an exhausted generator.
device = self.device_type
generator = (a for a in range(5))
class TestParametrized(TestCase):
@parametrize("x", generator)
def test_foo(self, device, x):
pass
# Reuse generator from first test function.
@parametrize("y", generator)
def test_bar(self, device, y):
pass
with self.assertRaisesRegex(ValueError, 'An empty arg_values was passed'):
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
def test_default_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@parametrize("x", range(5))
def test_default_names(self, device, x):
pass
@parametrize("x,y", [(1, 2), (2, 3), (3, 4)])
def test_two_things_default_names(self, device, x, y):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_default_names_x_0_{}',
'{}.test_default_names_x_1_{}',
'{}.test_default_names_x_2_{}',
'{}.test_default_names_x_3_{}',
'{}.test_default_names_x_4_{}',
'{}.test_two_things_default_names_x_1_y_2_{}',
'{}.test_two_things_default_names_x_2_y_3_{}',
'{}.test_two_things_default_names_x_3_y_4_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_name_fn(self, device):
device = self.device_type
class TestParametrized(TestCase):
@parametrize("bias", [False, True], name_fn=lambda b: 'bias' if b else 'no_bias')
def test_custom_names(self, device, bias):
pass
@parametrize("x", [1, 2], name_fn=str)
@parametrize("y", [3, 4], name_fn=str)
@parametrize("z", [5, 6], name_fn=str)
def test_three_things_composition_custom_names(self, device, x, y, z):
pass
@parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: '{}__{}'.format(x, y))
def test_two_things_custom_names_alternate(self, device, x, y):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_custom_names_bias_{}',
'{}.test_custom_names_no_bias_{}',
'{}.test_three_things_composition_custom_names_1_3_5_{}',
'{}.test_three_things_composition_custom_names_1_3_6_{}',
'{}.test_three_things_composition_custom_names_1_4_5_{}',
'{}.test_three_things_composition_custom_names_1_4_6_{}',
'{}.test_three_things_composition_custom_names_2_3_5_{}',
'{}.test_three_things_composition_custom_names_2_3_6_{}',
'{}.test_three_things_composition_custom_names_2_4_5_{}',
'{}.test_three_things_composition_custom_names_2_4_6_{}',
'{}.test_two_things_custom_names_alternate_1__2_{}',
'{}.test_two_things_custom_names_alternate_1__3_{}',
'{}.test_two_things_custom_names_alternate_1__4_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_subtest_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@parametrize("bias", [subtest(True, name='bias'),
subtest(False, name='no_bias')])
def test_custom_names(self, device, bias):
pass
@parametrize("x,y", [subtest((1, 2), name='double'),
subtest((1, 3), name='triple'),
subtest((1, 4), name='quadruple')])
def test_two_things_custom_names(self, device, x, y):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_custom_names_bias_{}',
'{}.test_custom_names_no_bias_{}',
'{}.test_two_things_custom_names_double_{}',
'{}.test_two_things_custom_names_quadruple_{}',
'{}.test_two_things_custom_names_triple_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_ops_composition_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@ops(op_db)
@parametrize("flag", [False, True], lambda f: 'flag_enabled' if f else 'flag_disabled')
def test_op_parametrized(self, device, dtype, op, flag):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = []
for op in op_db:
for dtype in op.supported_dtypes(torch.device(device).type):
for flag_part in ('flag_disabled', 'flag_enabled'):
expected_name = '{}.test_op_parametrized_{}_{}_{}_{}'.format(
device_cls.__name__, op.formatted_name, flag_part, device, dtype_name(dtype))
expected_test_names.append(expected_name)
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
def test_modules_composition_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@modules(module_db)
@parametrize("flag", [False, True], lambda f: 'flag_enabled' if f else 'flag_disabled')
def test_module_parametrized(self, device, dtype, module_info, training, flag):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = []
for module_info in module_db:
for dtype in module_info.dtypes:
for flag_part in ('flag_disabled', 'flag_enabled'):
expected_train_modes = (
['train_mode', 'eval_mode'] if module_info.train_and_eval_differ else [''])
for training_part in expected_train_modes:
expected_name = '{}.test_module_parametrized_{}{}_{}_{}_{}'.format(
device_cls.__name__, module_info.formatted_name,
'_' + training_part if len(training_part) > 0 else '',
flag_part, device, dtype_name(dtype))
expected_test_names.append(expected_name)
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
def test_ops_decorator_applies_op_and_param_specific_decorators(self, device):
# Test that decorators can be applied on a per-op / per-param basis.
# Create a test op, OpInfo entry, and decorator to apply.
def test_op(x):
return -x
def test_dec(func):
func._decorator_applied = True
return func
test_op_info = OpInfo(
'test_op',
op=test_op,
dtypes=floating_types(),
sample_inputs_func=lambda _: [],
decorators=[
DecorateInfo(test_dec, 'TestParametrized', 'test_op_param',
device_type='cpu', dtypes=[torch.float64],
active_if=lambda p: p['x'] == 2)
])
class TestParametrized(TestCase):
@ops(op_db + [test_op_info])
@parametrize("x", [2, 3])
def test_op_param(self, device, dtype, op, x):
pass
@ops(op_db + [test_op_info])
@parametrize("y", [
subtest(4),
subtest(5, decorators=[test_dec])])
def test_other(self, device, dtype, op, y):
pass
device = self.device_type
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = (name == 'test_op_param_test_op_x_2_cpu_float64' or
('test_other' in name and 'y_5' in name))
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
def test_modules_decorator_applies_module_and_param_specific_decorators(self, device):
# Test that decorators can be applied on a per-module / per-param basis.
# Create a test module, ModuleInfo entry, and decorator to apply.
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.x = torch.nn.Parameter(torch.randn(3))
def forward(self, y):
return self.x + y
def test_dec(func):
func._decorator_applied = True
return func
test_module_info = ModuleInfo(
TestModule,
module_inputs_func=lambda _: [],
decorators=[
DecorateInfo(test_dec, 'TestParametrized', 'test_module_param',
device_type='cpu', dtypes=[torch.float64],
active_if=lambda p: p['x'] == 2)
])
class TestParametrized(TestCase):
@modules(module_db + [test_module_info])
@parametrize("x", [2, 3])
def test_module_param(self, device, dtype, module_info, training, x):
pass
@modules(module_db + [test_module_info])
@parametrize("y", [
subtest(4),
subtest(5, decorators=[test_dec])])
def test_other(self, device, dtype, module_info, training, y):
pass
device = self.device_type
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = (name == 'test_module_param_TestModule_x_2_cpu_float64' or
('test_other' in name and 'y_5' in name))
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
def test_dtypes_composition_valid(self, device):
# Test checks that @parametrize and @dtypes compose as expected when @parametrize
# doesn't set dtype.
device = self.device_type
class TestParametrized(TestCase):
@dtypes(torch.float32, torch.float64)
@parametrize("x", range(3))
def test_parametrized(self, x, dtype):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_parametrized_x_0_{}_float32',
'{}.test_parametrized_x_0_{}_float64',
'{}.test_parametrized_x_1_{}_float32',
'{}.test_parametrized_x_1_{}_float64',
'{}.test_parametrized_x_2_{}_float32',
'{}.test_parametrized_x_2_{}_float64')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
def test_dtypes_composition_invalid(self, device):
# Test checks that @dtypes cannot be composed with parametrization decorators when they
# also try to set dtype.
device = self.device_type
class TestParametrized(TestCase):
@dtypes(torch.float32, torch.float64)
@parametrize("dtype", [torch.int32, torch.int64])
def test_parametrized(self, dtype):
pass
with self.assertRaisesRegex(RuntimeError, "handled multiple times"):
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
# Verify proper error behavior with @ops + @dtypes, as both try to set dtype.
class TestParametrized(TestCase):
@dtypes(torch.float32, torch.float64)
@ops(op_db)
def test_parametrized(self, op, dtype):
pass
with self.assertRaisesRegex(RuntimeError, "handled multiple times"):
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
def test_multiple_handling_of_same_param_error(self, device):
# Test that multiple decorators handling the same param errors out.
# Both @modules and @ops handle the dtype param.
class TestParametrized(TestCase):
@ops(op_db)
@modules(module_db)
def test_param(self, device, dtype, op, module_info, training):
pass
with self.assertRaisesRegex(RuntimeError, "handled multiple times"):
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
@parametrize("x", [1, subtest(2, decorators=[unittest.expectedFailure]), 3])
def test_subtest_expected_failure(self, device, x):
if x == 2:
raise RuntimeError('Boom')
@parametrize("x", [subtest(1, decorators=[unittest.expectedFailure]), 2, 3])
@parametrize("y", [4, 5, subtest(6, decorators=[unittest.expectedFailure])])
def test_two_things_subtest_expected_failure(self, device, x, y):
if x == 1 or y == 6:
raise RuntimeError('Boom')
instantiate_parametrized_tests(TestTestParametrization)
instantiate_device_type_tests(TestTestParametrizationDeviceType, globals())
class TestImports(TestCase):
def test_circular_dependencies(self) -> None:
""" Checks that all modules inside torch can be imported
Prevents regression reported in https://github.com/pytorch/pytorch/issues/77441 """
ignored_modules = ["torch.utils.tensorboard", # deps on tensorboard
"torch.distributed.elastic.rendezvous", # depps on etcd
"torch.backends._coreml", # depends on pycoreml
"torch.contrib.", # something weird
"torch.testing._internal.distributed.", # just fails
"torch.ao.pruning._experimental.", # depends on pytorch_lightning, not user-facing
"torch.onnx._internal.fx", # depends on onnx-script
]
# See https://github.com/pytorch/pytorch/issues/77801
if not sys.version_info >= (3, 9):
ignored_modules.append("torch.utils.benchmark")
if IS_WINDOWS or IS_MACOS:
# Distributed should be importable on Windows(except nn.api.), but not on Mac
if IS_MACOS:
ignored_modules.append("torch.distributed.")
else:
ignored_modules.append("torch.distributed.nn.api.")
ignored_modules.append("torch.distributed.optim.")
ignored_modules.append("torch.distributed.pipeline.")
ignored_modules.append("torch.distributed.rpc.")
ignored_modules.append("torch.testing._internal.dist_utils")
# And these both end up with transitive dependencies on distributed
ignored_modules.append("torch.nn.parallel._replicated_tensor_ddp_interop")
ignored_modules.append("torch.testing._internal.common_fsdp")
ignored_modules.append("torch.testing._internal.common_distributed")
torch_dir = os.path.dirname(torch.__file__)
for base, folders, files in os.walk(torch_dir):
prefix = os.path.relpath(base, os.path.dirname(torch_dir)).replace(os.path.sep, ".")
for f in files:
if not f.endswith(".py"):
continue
mod_name = f"{prefix}.{f[:-3]}" if f != "__init__.py" else prefix
# Do not attempt to import executable modules
if f == "__main__.py":
continue
if any(mod_name.startswith(x) for x in ignored_modules):
continue
try:
mod = importlib.import_module(mod_name)
except Exception as e:
raise RuntimeError(f"Failed to import {mod_name}: {e}") from e
self.assertTrue(inspect.ismodule(mod))
@unittest.skipIf(IS_WINDOWS, "importing torch+CUDA on CPU results in warning")
def test_no_warning_on_import(self) -> None:
out = subprocess.check_output(
[sys.executable, "-W", "all", "-c", "import torch"],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),).decode("utf-8")
self.assertEqual(out, "")
@unittest.skipIf(IS_WINDOWS, "importing torch+CUDA on CPU results in warning")
@parametrize('path', ['torch', 'functorch'])
def test_no_mutate_global_logging_on_import(self, path) -> None:
# Calling logging.basicConfig, among other things, modifies the global
# logging state. It is not OK to modify the global logging state on
# `import torch` (or other submodules we own) because users do not expect it.
expected = 'abcdefghijklmnopqrstuvwxyz'
commands = [
'import logging',
f'import {path}',
'_logger = logging.getLogger("torch_test_testing")',
'logging.root.addHandler(logging.StreamHandler())',
'logging.root.setLevel(logging.INFO)',
f'_logger.info("{expected}")'
]
out = subprocess.check_output(
[sys.executable, "-W", "all", "-c", "; ".join(commands)],
stderr=subprocess.STDOUT,
).decode("utf-8")
self.assertEqual(out.strip(), expected)
class TestOpInfos(TestCase):
def test_sample_input(self) -> None:
a, b, c, d, e = [object() for _ in range(5)]
# Construction with natural syntax
s = SampleInput(a, b, c, d=d, e=e)
assert s.input is a
assert s.args == (b, c)
assert s.kwargs == dict(d=d, e=e)
# Construction with explicit args and kwargs
s = SampleInput(a, args=(b,), kwargs=dict(c=c, d=d, e=e))
assert s.input is a
assert s.args == (b,)
assert s.kwargs == dict(c=c, d=d, e=e)
# Construction with a mixed form will error
with self.assertRaises(AssertionError):
s = SampleInput(a, b, c, args=(d, e))
with self.assertRaises(AssertionError):
s = SampleInput(a, b, c, kwargs=dict(d=d, e=e))
with self.assertRaises(AssertionError):
s = SampleInput(a, args=(b, c), d=d, e=e)
with self.assertRaises(AssertionError):
s = SampleInput(a, b, c=c, kwargs=dict(d=d, e=e))
# Mixing metadata into "natural" construction will error
with self.assertRaises(AssertionError):
s = SampleInput(a, b, name="foo")
with self.assertRaises(AssertionError):
s = SampleInput(a, b, output_process_fn_grad=lambda x: x)
with self.assertRaises(AssertionError):
s = SampleInput(a, b, broadcasts_input=True)
# But when only input is given, metadata is allowed for backward
# compatibility
s = SampleInput(a, broadcasts_input=True)
assert s.input is a
assert s.broadcasts_input
def test_sample_input_metadata(self) -> None:
a, b = [object() for _ in range(2)]
s1 = SampleInput(a, b=b)
self.assertIs(s1.output_process_fn_grad(None), None)
self.assertFalse(s1.broadcasts_input)
self.assertEqual(s1.name, "")
s2 = s1.with_metadata(
output_process_fn_grad=lambda x: a,
broadcasts_input=True,
name="foo",
)
self.assertIs(s1, s2)
self.assertIs(s2.output_process_fn_grad(None), a)
self.assertTrue(s2.broadcasts_input)
self.assertEqual(s2.name, "foo")
# Tests that validate the various sample generating functions on each OpInfo.
class TestOpInfoSampleFunctions(TestCase):
@ops(op_db, dtypes=OpDTypes.any_one)
def test_opinfo_sample_generators(self, device, dtype, op):
# Test op.sample_inputs doesn't generate multiple samples when called
samples = op.sample_inputs(device, dtype)
self.assertIsInstance(samples, Generator)
@ops([op for op in op_db if op.reference_inputs_func is not None], dtypes=OpDTypes.any_one)
def test_opinfo_reference_generators(self, device, dtype, op):
# Test op.reference_inputs doesn't generate multiple samples when called
samples = op.reference_inputs(device, dtype)
self.assertIsInstance(samples, Generator)
@ops([op for op in op_db if op.error_inputs_func is not None], dtypes=OpDTypes.none)
def test_opinfo_error_generators(self, device, op):
# Test op.error_inputs doesn't generate multiple inputs when called
samples = op.error_inputs(device)
self.assertIsInstance(samples, Generator)
instantiate_device_type_tests(TestOpInfoSampleFunctions, globals())
instantiate_parametrized_tests(TestImports)
if __name__ == '__main__':
run_tests()
|
def _get_test_funcs_for_test_class(test_cls):
""" Convenience function to get all (test function, parametrized_name) pairs for a given test class. """
test_funcs = [(getattr(test_cls, key), key) for key in test_cls.__dict__ if key.startswith('test_')]
return test_funcs
class TestTestParametrization(TestCase):
def test_default_names(self):
class TestParametrized(TestCase):
@parametrize("x", range(5))
def test_default_names(self, x):
pass
@parametrize("x,y", [(1, 2), (2, 3), (3, 4)])
def test_two_things_default_names(self, x, y):
pass
instantiate_parametrized_tests(TestParametrized)
expected_test_names = [
'TestParametrized.test_default_names_x_0',
'TestParametrized.test_default_names_x_1',
'TestParametrized.test_default_names_x_2',
'TestParametrized.test_default_names_x_3',
'TestParametrized.test_default_names_x_4',
'TestParametrized.test_two_things_default_names_x_1_y_2',
'TestParametrized.test_two_things_default_names_x_2_y_3',
'TestParametrized.test_two_things_default_names_x_3_y_4',
]
test_names = _get_test_names_for_test_class(TestParametrized)
self.assertEqual(expected_test_names, test_names)
def test_name_fn(self):
class TestParametrized(TestCase):
@parametrize("bias", [False, True], name_fn=lambda b: 'bias' if b else 'no_bias')
def test_custom_names(self, bias):
pass
@parametrize("x", [1, 2], name_fn=str)
@parametrize("y", [3, 4], name_fn=str)
@parametrize("z", [5, 6], name_fn=str)
def test_three_things_composition_custom_names(self, x, y, z):
pass
@parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: f'{x}__{y}')
def test_two_things_custom_names_alternate(self, x, y):
pass
instantiate_parametrized_tests(TestParametrized)
expected_test_names = [
'TestParametrized.test_custom_names_bias',
'TestParametrized.test_custom_names_no_bias',
'TestParametrized.test_three_things_composition_custom_names_1_3_5',
'TestParametrized.test_three_things_composition_custom_names_1_3_6',
'TestParametrized.test_three_things_composition_custom_names_1_4_5',
'TestParametrized.test_three_things_composition_custom_names_1_4_6',
'TestParametrized.test_three_things_composition_custom_names_2_3_5',
'TestParametrized.test_three_things_composition_custom_names_2_3_6',
'TestParametrized.test_three_things_composition_custom_names_2_4_5',
'TestParametrized.test_three_things_composition_custom_names_2_4_6',
'TestParametrized.test_two_things_custom_names_alternate_1__2',
'TestParametrized.test_two_things_custom_names_alternate_1__3',
'TestParametrized.test_two_things_custom_names_alternate_1__4',
]
test_names = _get_test_names_for_test_class(TestParametrized)
self.assertEqual(expected_test_names, test_names)
def test_subtest_names(self):
class TestParametrized(TestCase):
@parametrize("bias", [subtest(True, name='bias'),
subtest(False, name='no_bias')])
def test_custom_names(self, bias):
pass
@parametrize("x,y", [subtest((1, 2), name='double'),
subtest((1, 3), name='triple'),
subtest((1, 4), name='quadruple')])
def test_two_things_custom_names(self, x, y):
pass
instantiate_parametrized_tests(TestParametrized)
expected_test_names = [
'TestParametrized.test_custom_names_bias',
'TestParametrized.test_custom_names_no_bias',
'TestParametrized.test_two_things_custom_names_double',
'TestParametrized.test_two_things_custom_names_quadruple',
'TestParametrized.test_two_things_custom_names_triple',
]
test_names = _get_test_names_for_test_class(TestParametrized)
self.assertEqual(expected_test_names, test_names)
def test_apply_param_specific_decorators(self):
# Test that decorators can be applied on a per-param basis.
def test_dec(func):
func._decorator_applied = True
return func
class TestParametrized(TestCase):
@parametrize("x", [subtest(1, name='one'),
subtest(2, name='two', decorators=[test_dec]),
subtest(3, name='three')])
def test_param(self, x):
pass
instantiate_parametrized_tests(TestParametrized)
for test_func, name in _get_test_funcs_for_test_class(TestParametrized):
self.assertEqual(hasattr(test_func, '_decorator_applied'), name == 'test_param_two')
def test_compose_param_specific_decorators(self):
# Test that multiple per-param decorators compose correctly.
def test_dec(func):
func._decorator_applied = True
return func
class TestParametrized(TestCase):
@parametrize("x", [subtest(1),
subtest(2, decorators=[test_dec]),
subtest(3)])
@parametrize("y", [subtest(False, decorators=[test_dec]),
subtest(True)])
def test_param(self, x, y):
pass
instantiate_parametrized_tests(TestParametrized)
for test_func, name in _get_test_funcs_for_test_class(TestParametrized):
# Decorator should be applied whenever either x == 2 or y == False.
should_apply = ('x_2' in name) or ('y_False' in name)
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
def test_modules_decorator_misuse_error(self):
# Test that @modules errors out when used with instantiate_parametrized_tests().
class TestParametrized(TestCase):
@modules(module_db)
def test_modules(self, module_info):
pass
with self.assertRaisesRegex(RuntimeError, 'intended to be used in a device-specific context'):
instantiate_parametrized_tests(TestParametrized)
def test_ops_decorator_misuse_error(self):
# Test that @ops errors out when used with instantiate_parametrized_tests().
class TestParametrized(TestCase):
@ops(op_db)
def test_ops(self, module_info):
pass
with self.assertRaisesRegex(RuntimeError, 'intended to be used in a device-specific context'):
instantiate_parametrized_tests(TestParametrized)
def test_multiple_handling_of_same_param_error(self):
# Test that multiple decorators handling the same param errors out.
class TestParametrized(TestCase):
@parametrize("x", range(3))
@parametrize("x", range(5))
def test_param(self, x):
pass
with self.assertRaisesRegex(RuntimeError, 'multiple parametrization decorators'):
instantiate_parametrized_tests(TestParametrized)
@parametrize("x", [1, subtest(2, decorators=[unittest.expectedFailure]), 3])
def test_subtest_expected_failure(self, x):
if x == 2:
raise RuntimeError('Boom')
@parametrize("x", [subtest(1, decorators=[unittest.expectedFailure]), 2, 3])
@parametrize("y", [4, 5, subtest(6, decorators=[unittest.expectedFailure])])
def test_two_things_subtest_expected_failure(self, x, y):
if x == 1 or y == 6:
raise RuntimeError('Boom')
class TestTestParametrizationDeviceType(TestCase):
def test_unparametrized_names(self, device):
# This test exists to protect against regressions in device / dtype test naming
# due to parametrization logic.
device = self.device_type
class TestParametrized(TestCase):
def test_device_specific(self, device):
pass
@dtypes(torch.float32, torch.float64)
def test_device_dtype_specific(self, device, dtype):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_device_dtype_specific_{}_float32',
'{}.test_device_dtype_specific_{}_float64',
'{}.test_device_specific_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_empty_param_names(self, device):
# If no param names are passed, ensure things still work without parametrization.
device = self.device_type
class TestParametrized(TestCase):
@parametrize("", [])
def test_foo(self, device):
pass
@parametrize("", range(5))
def test_bar(self, device):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_bar_{}',
'{}.test_foo_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_empty_param_list(self, device):
# If no param values are passed, ensure a helpful error message is thrown.
# In the wild, this could indicate reuse of an exhausted generator.
device = self.device_type
generator = (a for a in range(5))
class TestParametrized(TestCase):
@parametrize("x", generator)
def test_foo(self, device, x):
pass
# Reuse generator from first test function.
@parametrize("y", generator)
def test_bar(self, device, y):
pass
with self.assertRaisesRegex(ValueError, 'An empty arg_values was passed'):
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
def test_default_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@parametrize("x", range(5))
def test_default_names(self, device, x):
pass
@parametrize("x,y", [(1, 2), (2, 3), (3, 4)])
def test_two_things_default_names(self, device, x, y):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_default_names_x_0_{}',
'{}.test_default_names_x_1_{}',
'{}.test_default_names_x_2_{}',
'{}.test_default_names_x_3_{}',
'{}.test_default_names_x_4_{}',
'{}.test_two_things_default_names_x_1_y_2_{}',
'{}.test_two_things_default_names_x_2_y_3_{}',
'{}.test_two_things_default_names_x_3_y_4_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_default_name_non_primitive(self, device):
device = self.device_type
class TestParametrized(TestCase):
@parametrize("x", [1, .5, "foo", object()])
def test_default_names(self, device, x):
pass
@parametrize("x,y", [(1, object()), (object(), .5), (object(), object())])
def test_two_things_default_names(self, device, x, y):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
expected_test_names = sorted(name.format(device_cls.__name__, device) for name in (
'{}.test_default_names_x_1_{}',
'{}.test_default_names_x_0_5_{}',
'{}.test_default_names_x_foo_{}',
'{}.test_default_names_x3_{}',
'{}.test_two_things_default_names_x_1_y0_{}',
'{}.test_two_things_default_names_x1_y_0_5_{}',
'{}.test_two_things_default_names_x2_y2_{}')
)
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_name_fn(self, device):
device = self.device_type
class TestParametrized(TestCase):
@parametrize("bias", [False, True], name_fn=lambda b: 'bias' if b else 'no_bias')
def test_custom_names(self, device, bias):
pass
@parametrize("x", [1, 2], name_fn=str)
@parametrize("y", [3, 4], name_fn=str)
@parametrize("z", [5, 6], name_fn=str)
def test_three_things_composition_custom_names(self, device, x, y, z):
pass
@parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: f'{x}__{y}')
def test_two_things_custom_names_alternate(self, device, x, y):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_custom_names_bias_{}',
'{}.test_custom_names_no_bias_{}',
'{}.test_three_things_composition_custom_names_1_3_5_{}',
'{}.test_three_things_composition_custom_names_1_3_6_{}',
'{}.test_three_things_composition_custom_names_1_4_5_{}',
'{}.test_three_things_composition_custom_names_1_4_6_{}',
'{}.test_three_things_composition_custom_names_2_3_5_{}',
'{}.test_three_things_composition_custom_names_2_3_6_{}',
'{}.test_three_things_composition_custom_names_2_4_5_{}',
'{}.test_three_things_composition_custom_names_2_4_6_{}',
'{}.test_two_things_custom_names_alternate_1__2_{}',
'{}.test_two_things_custom_names_alternate_1__3_{}',
'{}.test_two_things_custom_names_alternate_1__4_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_subtest_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@parametrize("bias", [subtest(True, name='bias'),
subtest(False, name='no_bias')])
def test_custom_names(self, device, bias):
pass
@parametrize("x,y", [subtest((1, 2), name='double'),
subtest((1, 3), name='triple'),
subtest((1, 4), name='quadruple')])
def test_two_things_custom_names(self, device, x, y):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_custom_names_bias_{}',
'{}.test_custom_names_no_bias_{}',
'{}.test_two_things_custom_names_double_{}',
'{}.test_two_things_custom_names_quadruple_{}',
'{}.test_two_things_custom_names_triple_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_ops_composition_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@ops(op_db)
@parametrize("flag", [False, True], lambda f: 'flag_enabled' if f else 'flag_disabled')
def test_op_parametrized(self, device, dtype, op, flag):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
expected_test_names = []
for op in op_db:
for dtype in op.supported_dtypes(torch.device(device).type):
for flag_part in ('flag_disabled', 'flag_enabled'):
expected_name = f'{device_cls.__name__}.test_op_parametrized_{op.formatted_name}_{flag_part}_{device}_{dtype_name(dtype)}' # noqa: B950
expected_test_names.append(expected_name)
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
def test_modules_composition_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@modules(module_db)
@parametrize("flag", [False, True], lambda f: 'flag_enabled' if f else 'flag_disabled')
def test_module_parametrized(self, device, dtype, module_info, training, flag):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
expected_test_names = []
for module_info in module_db:
for dtype in module_info.dtypes:
for flag_part in ('flag_disabled', 'flag_enabled'):
expected_train_modes = (
['train_mode', 'eval_mode'] if module_info.train_and_eval_differ else [''])
for training_part in expected_train_modes:
expected_name = '{}.test_module_parametrized_{}{}_{}_{}_{}'.format(
device_cls.__name__, module_info.formatted_name,
'_' + training_part if len(training_part) > 0 else '',
flag_part, device, dtype_name(dtype))
expected_test_names.append(expected_name)
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
def test_ops_decorator_applies_op_and_param_specific_decorators(self, device):
# Test that decorators can be applied on a per-op / per-param basis.
# Create a test op, OpInfo entry, and decorator to apply.
def test_op(x):
return -x
def test_dec(func):
func._decorator_applied = True
return func
test_op_info = OpInfo(
'test_op',
op=test_op,
dtypes=floating_types(),
sample_inputs_func=lambda _: [],
decorators=[
DecorateInfo(test_dec, 'TestParametrized', 'test_op_param',
device_type='cpu', dtypes=[torch.float64],
active_if=lambda p: p['x'] == 2)
])
class TestParametrized(TestCase):
@ops(op_db + [test_op_info])
@parametrize("x", [2, 3])
def test_op_param(self, device, dtype, op, x):
pass
@ops(op_db + [test_op_info])
@parametrize("y", [
subtest(4),
subtest(5, decorators=[test_dec])])
def test_other(self, device, dtype, op, y):
pass
@decorateIf(test_dec, lambda p: p['dtype'] == torch.int16)
@ops(op_db)
def test_three(self, device, dtype, op):
pass
device = self.device_type
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = (name == 'test_op_param_test_op_x_2_cpu_float64' or
('test_other' in name and 'y_5' in name) or
('test_three' in name and name.endswith('_int16')))
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
def test_modules_decorator_applies_module_and_param_specific_decorators(self, device):
# Test that decorators can be applied on a per-module / per-param basis.
# Create a test module, ModuleInfo entry, and decorator to apply.
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.x = torch.nn.Parameter(torch.randn(3))
def forward(self, y):
return self.x + y
def test_dec(func):
func._decorator_applied = True
return func
test_module_info = ModuleInfo(
TestModule,
module_inputs_func=lambda _: [],
decorators=[
DecorateInfo(test_dec, 'TestParametrized', 'test_module_param',
device_type='cpu', dtypes=[torch.float64],
active_if=lambda p: p['x'] == 2)
])
class TestParametrized(TestCase):
@modules(module_db + [test_module_info])
@parametrize("x", [2, 3])
def test_module_param(self, device, dtype, module_info, training, x):
pass
@modules(module_db + [test_module_info])
@parametrize("y", [
subtest(4),
subtest(5, decorators=[test_dec])])
def test_other(self, device, dtype, module_info, training, y):
pass
@decorateIf(test_dec, lambda p: p['dtype'] == torch.float64)
@modules(module_db)
def test_three(self, device, dtype, module_info):
pass
device = self.device_type
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = (name == 'test_module_param_TestModule_x_2_cpu_float64' or
('test_other' in name and 'y_5' in name) or
('test_three' in name and name.endswith('float64')))
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
def test_param_specific_decoration(self, device):
def test_dec(func):
func._decorator_applied = True
return func
class TestParametrized(TestCase):
@decorateIf(test_dec, lambda params: params["x"] == 1 and params["y"])
@parametrize("x", range(5))
@parametrize("y", [False, True])
def test_param(self, x, y):
pass
device = self.device_type
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = ('test_param_x_1_y_True' in name)
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
def test_dtypes_composition_valid(self, device):
# Test checks that @parametrize and @dtypes compose as expected when @parametrize
# doesn't set dtype.
device = self.device_type
class TestParametrized(TestCase):
@dtypes(torch.float32, torch.float64)
@parametrize("x", range(3))
def test_parametrized(self, x, dtype):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_parametrized_x_0_{}_float32',
'{}.test_parametrized_x_0_{}_float64',
'{}.test_parametrized_x_1_{}_float32',
'{}.test_parametrized_x_1_{}_float64',
'{}.test_parametrized_x_2_{}_float32',
'{}.test_parametrized_x_2_{}_float64')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
def test_dtypes_composition_invalid(self, device):
# Test checks that @dtypes cannot be composed with parametrization decorators when they
# also try to set dtype.
device = self.device_type
class TestParametrized(TestCase):
@dtypes(torch.float32, torch.float64)
@parametrize("dtype", [torch.int32, torch.int64])
def test_parametrized(self, dtype):
pass
with self.assertRaisesRegex(RuntimeError, "handled multiple times"):
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
# Verify proper error behavior with @ops + @dtypes, as both try to set dtype.
class TestParametrized(TestCase):
@dtypes(torch.float32, torch.float64)
@ops(op_db)
def test_parametrized(self, op, dtype):
pass
with self.assertRaisesRegex(RuntimeError, "handled multiple times"):
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
def test_multiple_handling_of_same_param_error(self, device):
# Test that multiple decorators handling the same param errors out.
# Both @modules and @ops handle the dtype param.
class TestParametrized(TestCase):
@ops(op_db)
@modules(module_db)
def test_param(self, device, dtype, op, module_info, training):
pass
with self.assertRaisesRegex(RuntimeError, "handled multiple times"):
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
@parametrize("x", [1, subtest(2, decorators=[unittest.expectedFailure]), 3])
def test_subtest_expected_failure(self, device, x):
if x == 2:
raise RuntimeError('Boom')
@parametrize("x", [subtest(1, decorators=[unittest.expectedFailure]), 2, 3])
@parametrize("y", [4, 5, subtest(6, decorators=[unittest.expectedFailure])])
def test_two_things_subtest_expected_failure(self, device, x, y):
if x == 1 or y == 6:
raise RuntimeError('Boom')
instantiate_parametrized_tests(TestTestParametrization)
instantiate_device_type_tests(TestTestParametrizationDeviceType, globals())
class TestImports(TestCase):
@classmethod
def _check_python_output(cls, program) -> str:
return subprocess.check_output(
[sys.executable, "-W", "always", "-c", program],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),).decode("utf-8")
def test_circular_dependencies(self) -> None:
""" Checks that all modules inside torch can be imported
Prevents regression reported in https://github.com/pytorch/pytorch/issues/77441 """
ignored_modules = ["torch.utils.tensorboard", # deps on tensorboard
"torch.distributed.elastic.rendezvous", # depps on etcd
"torch.backends._coreml", # depends on pycoreml
"torch.contrib.", # something weird
"torch.testing._internal.distributed.", # just fails
"torch.ao.pruning._experimental.", # depends on pytorch_lightning, not user-facing
"torch.onnx._internal", # depends on onnx-script
"torch._inductor.runtime.triton_helpers", # depends on triton
"torch._inductor.codegen.cuda", # depends on cutlass
]
# See https://github.com/pytorch/pytorch/issues/77801
if not sys.version_info >= (3, 9):
ignored_modules.append("torch.utils.benchmark")
if IS_WINDOWS or IS_MACOS or IS_JETSON:
# Distributed should be importable on Windows(except nn.api.), but not on Mac
if IS_MACOS or IS_JETSON:
ignored_modules.append("torch.distributed.")
else:
ignored_modules.append("torch.distributed.nn.api.")
ignored_modules.append("torch.distributed.optim.")
ignored_modules.append("torch.distributed.rpc.")
ignored_modules.append("torch.testing._internal.dist_utils")
# And these both end up with transitive dependencies on distributed
ignored_modules.append("torch.nn.parallel._replicated_tensor_ddp_interop")
ignored_modules.append("torch.testing._internal.common_fsdp")
ignored_modules.append("torch.testing._internal.common_distributed")
torch_dir = os.path.dirname(torch.__file__)
for base, folders, files in os.walk(torch_dir):
prefix = os.path.relpath(base, os.path.dirname(torch_dir)).replace(os.path.sep, ".")
for f in files:
if not f.endswith(".py"):
continue
mod_name = f"{prefix}.{f[:-3]}" if f != "__init__.py" else prefix
# Do not attempt to import executable modules
if f == "__main__.py":
continue
if any(mod_name.startswith(x) for x in ignored_modules):
continue
try:
mod = importlib.import_module(mod_name)
except Exception as e:
raise RuntimeError(f"Failed to import {mod_name}: {e}") from e
self.assertTrue(inspect.ismodule(mod))
@unittest.skipIf(IS_WINDOWS, "TODO enable on Windows")
def test_lazy_imports_are_lazy(self) -> None:
out = self._check_python_output("import sys;import torch;print(all(x not in sys.modules for x in torch._lazy_modules))")
self.assertEqual(out.strip(), "True")
@unittest.skipIf(IS_WINDOWS, "importing torch+CUDA on CPU results in warning")
def test_no_warning_on_import(self) -> None:
out = self._check_python_output("import torch")
self.assertEqual(out, "")
def test_not_import_sympy(self) -> None:
out = self._check_python_output("import torch;import sys;print('sympy' not in sys.modules)")
self.assertEqual(out.strip(), "True",
"PyTorch should not depend on SymPy at import time as importing SymPy is *very* slow.\n"
"See the beginning of the following blog post for how to profile and find which file is importing sympy:\n"
"https://dev-discuss.pytorch.org/t/delving-into-what-happens-when-you-import-torch/1589\n\n"
"If you hit this error, you may want to:\n"
" - Refactor your code to avoid depending on sympy files you may not need to depend\n"
" - Use TYPE_CHECKING if you are using sympy + strings if you are using sympy on type annotations\n"
" - Import things that depend on SymPy locally")
@unittest.skipIf(IS_WINDOWS, "importing torch+CUDA on CPU results in warning")
@parametrize('path', ['torch', 'functorch'])
def test_no_mutate_global_logging_on_import(self, path) -> None:
# Calling logging.basicConfig, among other things, modifies the global
# logging state. It is not OK to modify the global logging state on
# `import torch` (or other submodules we own) because users do not expect it.
expected = 'abcdefghijklmnopqrstuvwxyz'
commands = [
'import logging',
f'import {path}',
'_logger = logging.getLogger("torch_test_testing")',
'logging.root.addHandler(logging.StreamHandler())',
'logging.root.setLevel(logging.INFO)',
f'_logger.info("{expected}")'
]
out = self._check_python_output("; ".join(commands))
self.assertEqual(out.strip(), expected)
class TestOpInfos(TestCase):
def test_sample_input(self) -> None:
a, b, c, d, e = (object() for _ in range(5))
# Construction with natural syntax
s = SampleInput(a, b, c, d=d, e=e)
assert s.input is a
assert s.args == (b, c)
assert s.kwargs == dict(d=d, e=e)
# Construction with explicit args and kwargs
s = SampleInput(a, args=(b,), kwargs=dict(c=c, d=d, e=e))
assert s.input is a
assert s.args == (b,)
assert s.kwargs == dict(c=c, d=d, e=e)
# Construction with a mixed form will error
with self.assertRaises(AssertionError):
s = SampleInput(a, b, c, args=(d, e))
with self.assertRaises(AssertionError):
s = SampleInput(a, b, c, kwargs=dict(d=d, e=e))
with self.assertRaises(AssertionError):
s = SampleInput(a, args=(b, c), d=d, e=e)
with self.assertRaises(AssertionError):
s = SampleInput(a, b, c=c, kwargs=dict(d=d, e=e))
# Mixing metadata into "natural" construction will error
with self.assertRaises(AssertionError):
s = SampleInput(a, b, name="foo")
with self.assertRaises(AssertionError):
s = SampleInput(a, b, output_process_fn_grad=lambda x: x)
with self.assertRaises(AssertionError):
s = SampleInput(a, b, broadcasts_input=True)
# But when only input is given, metadata is allowed for backward
# compatibility
s = SampleInput(a, broadcasts_input=True)
assert s.input is a
assert s.broadcasts_input
def test_sample_input_metadata(self) -> None:
a, b = (object() for _ in range(2))
s1 = SampleInput(a, b=b)
self.assertIs(s1.output_process_fn_grad(None), None)
self.assertFalse(s1.broadcasts_input)
self.assertEqual(s1.name, "")
s2 = s1.with_metadata(
output_process_fn_grad=lambda x: a,
broadcasts_input=True,
name="foo",
)
self.assertIs(s1, s2)
self.assertIs(s2.output_process_fn_grad(None), a)
self.assertTrue(s2.broadcasts_input)
self.assertEqual(s2.name, "foo")
# Tests that validate the various sample generating functions on each OpInfo.
class TestOpInfoSampleFunctions(TestCase):
@ops(op_db, dtypes=OpDTypes.any_one)
def test_opinfo_sample_generators(self, device, dtype, op):
# Test op.sample_inputs doesn't generate multiple samples when called
samples = op.sample_inputs(device, dtype)
self.assertIsInstance(samples, Iterator)
@ops([op for op in op_db if op.reference_inputs_func is not None], dtypes=OpDTypes.any_one)
def test_opinfo_reference_generators(self, device, dtype, op):
# Test op.reference_inputs doesn't generate multiple samples when called
samples = op.reference_inputs(device, dtype)
self.assertIsInstance(samples, Iterator)
@ops([op for op in op_db if op.error_inputs_func is not None], dtypes=OpDTypes.none)
def test_opinfo_error_generators(self, device, op):
# Test op.error_inputs doesn't generate multiple inputs when called
samples = op.error_inputs(device)
self.assertIsInstance(samples, Iterator)
instantiate_device_type_tests(TestOpInfoSampleFunctions, globals())
instantiate_parametrized_tests(TestImports)
if __name__ == '__main__':
run_tests()
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple, Generator
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, skipIfRocm, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_testing.py
|
test_name_fn
|
def test_name_fn(self):
class TestParametrized(TestCase):
@parametrize("bias", [False, True], name_fn=lambda b: 'bias' if b else 'no_bias')
def test_custom_names(self, bias):
pass
@parametrize("x", [1, 2], name_fn=str)
@parametrize("y", [3, 4], name_fn=str)
@parametrize("z", [5, 6], name_fn=str)
def test_three_things_composition_custom_names(self, x, y, z):
pass
@parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: '{}__{}'.format(x, y))
def test_two_things_custom_names_alternate(self, x, y):
pass
instantiate_parametrized_tests(TestParametrized)
expected_test_names = [
'TestParametrized.test_custom_names_bias',
'TestParametrized.test_custom_names_no_bias',
'TestParametrized.test_three_things_composition_custom_names_1_3_5',
'TestParametrized.test_three_things_composition_custom_names_1_3_6',
'TestParametrized.test_three_things_composition_custom_names_1_4_5',
'TestParametrized.test_three_things_composition_custom_names_1_4_6',
'TestParametrized.test_three_things_composition_custom_names_2_3_5',
'TestParametrized.test_three_things_composition_custom_names_2_3_6',
'TestParametrized.test_three_things_composition_custom_names_2_4_5',
'TestParametrized.test_three_things_composition_custom_names_2_4_6',
'TestParametrized.test_two_things_custom_names_alternate_1__2',
'TestParametrized.test_two_things_custom_names_alternate_1__3',
'TestParametrized.test_two_things_custom_names_alternate_1__4',
]
test_names = _get_test_names_for_test_class(TestParametrized)
self.assertEqual(expected_test_names, test_names)
|
def test_name_fn(self):
class TestParametrized(TestCase):
@parametrize("bias", [False, True], name_fn=lambda b: 'bias' if b else 'no_bias')
def test_custom_names(self, bias):
pass
@parametrize("x", [1, 2], name_fn=str)
@parametrize("y", [3, 4], name_fn=str)
@parametrize("z", [5, 6], name_fn=str)
def test_three_things_composition_custom_names(self, x, y, z):
pass
@parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: f'{x}__{y}')
def test_two_things_custom_names_alternate(self, x, y):
pass
instantiate_parametrized_tests(TestParametrized)
expected_test_names = [
'TestParametrized.test_custom_names_bias',
'TestParametrized.test_custom_names_no_bias',
'TestParametrized.test_three_things_composition_custom_names_1_3_5',
'TestParametrized.test_three_things_composition_custom_names_1_3_6',
'TestParametrized.test_three_things_composition_custom_names_1_4_5',
'TestParametrized.test_three_things_composition_custom_names_1_4_6',
'TestParametrized.test_three_things_composition_custom_names_2_3_5',
'TestParametrized.test_three_things_composition_custom_names_2_3_6',
'TestParametrized.test_three_things_composition_custom_names_2_4_5',
'TestParametrized.test_three_things_composition_custom_names_2_4_6',
'TestParametrized.test_two_things_custom_names_alternate_1__2',
'TestParametrized.test_two_things_custom_names_alternate_1__3',
'TestParametrized.test_two_things_custom_names_alternate_1__4',
]
test_names = _get_test_names_for_test_class(TestParametrized)
self.assertEqual(expected_test_names, test_names)
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple, Generator
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, skipIfRocm, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
class TestTestParametrization(TestCase):
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestTestParametrization(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_testing.py
|
test_unparametrized_names
|
def test_unparametrized_names(self, device):
# This test exists to protect against regressions in device / dtype test naming
# due to parametrization logic.
device = self.device_type
class TestParametrized(TestCase):
def test_device_specific(self, device):
pass
@dtypes(torch.float32, torch.float64)
def test_device_dtype_specific(self, device, dtype):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_device_dtype_specific_{}_float32',
'{}.test_device_dtype_specific_{}_float64',
'{}.test_device_specific_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
|
def test_unparametrized_names(self, device):
# This test exists to protect against regressions in device / dtype test naming
# due to parametrization logic.
device = self.device_type
class TestParametrized(TestCase):
def test_device_specific(self, device):
pass
@dtypes(torch.float32, torch.float64)
def test_device_dtype_specific(self, device, dtype):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_device_dtype_specific_{}_float32',
'{}.test_device_dtype_specific_{}_float64',
'{}.test_device_specific_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple, Generator
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, skipIfRocm, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
class TestTestParametrizationDeviceType(TestCase):
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestTestParametrizationDeviceType(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_testing.py
|
test_empty_param_names
|
def test_empty_param_names(self, device):
# If no param names are passed, ensure things still work without parametrization.
device = self.device_type
class TestParametrized(TestCase):
@parametrize("", [])
def test_foo(self, device):
pass
@parametrize("", range(5))
def test_bar(self, device):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_bar_{}',
'{}.test_foo_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
|
def test_empty_param_names(self, device):
# If no param names are passed, ensure things still work without parametrization.
device = self.device_type
class TestParametrized(TestCase):
@parametrize("", [])
def test_foo(self, device):
pass
@parametrize("", range(5))
def test_bar(self, device):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_bar_{}',
'{}.test_foo_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple, Generator
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, skipIfRocm, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
class TestTestParametrizationDeviceType(TestCase):
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestTestParametrizationDeviceType(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_testing.py
|
test_default_name_non_primitive
|
def test_default_name_non_primitive(self, device):
device = self.device_type
class TestParametrized(TestCase):
@parametrize("x", [1, .5, "foo", object()])
def test_default_names(self, device, x):
pass
@parametrize("x,y", [(1, object()), (object(), .5), (object(), object())])
def test_two_things_default_names(self, device, x, y):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
expected_test_names = sorted(name.format(device_cls.__name__, device) for name in (
'{}.test_default_names_x_1_{}',
'{}.test_default_names_x_0_5_{}',
'{}.test_default_names_x_foo_{}',
'{}.test_default_names_x3_{}',
'{}.test_two_things_default_names_x_1_y0_{}',
'{}.test_two_things_default_names_x1_y_0_5_{}',
'{}.test_two_things_default_names_x2_y2_{}')
)
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestTestParametrizationDeviceType(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_testing.py
|
test_name_fn
|
def test_name_fn(self):
class TestParametrized(TestCase):
@parametrize("bias", [False, True], name_fn=lambda b: 'bias' if b else 'no_bias')
def test_custom_names(self, bias):
pass
@parametrize("x", [1, 2], name_fn=str)
@parametrize("y", [3, 4], name_fn=str)
@parametrize("z", [5, 6], name_fn=str)
def test_three_things_composition_custom_names(self, x, y, z):
pass
@parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: '{}__{}'.format(x, y))
def test_two_things_custom_names_alternate(self, x, y):
pass
instantiate_parametrized_tests(TestParametrized)
expected_test_names = [
'TestParametrized.test_custom_names_bias',
'TestParametrized.test_custom_names_no_bias',
'TestParametrized.test_three_things_composition_custom_names_1_3_5',
'TestParametrized.test_three_things_composition_custom_names_1_3_6',
'TestParametrized.test_three_things_composition_custom_names_1_4_5',
'TestParametrized.test_three_things_composition_custom_names_1_4_6',
'TestParametrized.test_three_things_composition_custom_names_2_3_5',
'TestParametrized.test_three_things_composition_custom_names_2_3_6',
'TestParametrized.test_three_things_composition_custom_names_2_4_5',
'TestParametrized.test_three_things_composition_custom_names_2_4_6',
'TestParametrized.test_two_things_custom_names_alternate_1__2',
'TestParametrized.test_two_things_custom_names_alternate_1__3',
'TestParametrized.test_two_things_custom_names_alternate_1__4',
]
test_names = _get_test_names_for_test_class(TestParametrized)
self.assertEqual(expected_test_names, test_names)
|
def test_name_fn(self):
class TestParametrized(TestCase):
@parametrize("bias", [False, True], name_fn=lambda b: 'bias' if b else 'no_bias')
def test_custom_names(self, bias):
pass
@parametrize("x", [1, 2], name_fn=str)
@parametrize("y", [3, 4], name_fn=str)
@parametrize("z", [5, 6], name_fn=str)
def test_three_things_composition_custom_names(self, x, y, z):
pass
@parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: f'{x}__{y}')
def test_two_things_custom_names_alternate(self, x, y):
pass
instantiate_parametrized_tests(TestParametrized)
expected_test_names = [
'TestParametrized.test_custom_names_bias',
'TestParametrized.test_custom_names_no_bias',
'TestParametrized.test_three_things_composition_custom_names_1_3_5',
'TestParametrized.test_three_things_composition_custom_names_1_3_6',
'TestParametrized.test_three_things_composition_custom_names_1_4_5',
'TestParametrized.test_three_things_composition_custom_names_1_4_6',
'TestParametrized.test_three_things_composition_custom_names_2_3_5',
'TestParametrized.test_three_things_composition_custom_names_2_3_6',
'TestParametrized.test_three_things_composition_custom_names_2_4_5',
'TestParametrized.test_three_things_composition_custom_names_2_4_6',
'TestParametrized.test_two_things_custom_names_alternate_1__2',
'TestParametrized.test_two_things_custom_names_alternate_1__3',
'TestParametrized.test_two_things_custom_names_alternate_1__4',
]
test_names = _get_test_names_for_test_class(TestParametrized)
self.assertEqual(expected_test_names, test_names)
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple, Generator
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, skipIfRocm, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
class TestTestParametrization(TestCase):
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestTestParametrization(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_testing.py
|
test_ops_composition_names
|
def test_ops_composition_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@ops(op_db)
@parametrize("flag", [False, True], lambda f: 'flag_enabled' if f else 'flag_disabled')
def test_op_parametrized(self, device, dtype, op, flag):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = []
for op in op_db:
for dtype in op.supported_dtypes(torch.device(device).type):
for flag_part in ('flag_disabled', 'flag_enabled'):
expected_name = '{}.test_op_parametrized_{}_{}_{}_{}'.format(
device_cls.__name__, op.formatted_name, flag_part, device, dtype_name(dtype))
expected_test_names.append(expected_name)
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
|
def test_ops_composition_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@ops(op_db)
@parametrize("flag", [False, True], lambda f: 'flag_enabled' if f else 'flag_disabled')
def test_op_parametrized(self, device, dtype, op, flag):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
expected_test_names = []
for op in op_db:
for dtype in op.supported_dtypes(torch.device(device).type):
for flag_part in ('flag_disabled', 'flag_enabled'):
expected_name = f'{device_cls.__name__}.test_op_parametrized_{op.formatted_name}_{flag_part}_{device}_{dtype_name(dtype)}' # noqa: B950
expected_test_names.append(expected_name)
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple, Generator
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, skipIfRocm, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
class TestTestParametrizationDeviceType(TestCase):
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestTestParametrizationDeviceType(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_testing.py
|
test_modules_composition_names
|
def test_modules_composition_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@modules(module_db)
@parametrize("flag", [False, True], lambda f: 'flag_enabled' if f else 'flag_disabled')
def test_module_parametrized(self, device, dtype, module_info, training, flag):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = []
for module_info in module_db:
for dtype in module_info.dtypes:
for flag_part in ('flag_disabled', 'flag_enabled'):
expected_train_modes = (
['train_mode', 'eval_mode'] if module_info.train_and_eval_differ else [''])
for training_part in expected_train_modes:
expected_name = '{}.test_module_parametrized_{}{}_{}_{}_{}'.format(
device_cls.__name__, module_info.formatted_name,
'_' + training_part if len(training_part) > 0 else '',
flag_part, device, dtype_name(dtype))
expected_test_names.append(expected_name)
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
|
def test_modules_composition_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@modules(module_db)
@parametrize("flag", [False, True], lambda f: 'flag_enabled' if f else 'flag_disabled')
def test_module_parametrized(self, device, dtype, module_info, training, flag):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
expected_test_names = []
for module_info in module_db:
for dtype in module_info.dtypes:
for flag_part in ('flag_disabled', 'flag_enabled'):
expected_train_modes = (
['train_mode', 'eval_mode'] if module_info.train_and_eval_differ else [''])
for training_part in expected_train_modes:
expected_name = '{}.test_module_parametrized_{}{}_{}_{}_{}'.format(
device_cls.__name__, module_info.formatted_name,
'_' + training_part if len(training_part) > 0 else '',
flag_part, device, dtype_name(dtype))
expected_test_names.append(expected_name)
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple, Generator
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, skipIfRocm, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
class TestTestParametrizationDeviceType(TestCase):
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestTestParametrizationDeviceType(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_testing.py
|
test_ops_decorator_applies_op_and_param_specific_decorators
|
def test_ops_decorator_applies_op_and_param_specific_decorators(self, device):
# Test that decorators can be applied on a per-op / per-param basis.
# Create a test op, OpInfo entry, and decorator to apply.
def test_op(x):
return -x
def test_dec(func):
func._decorator_applied = True
return func
test_op_info = OpInfo(
'test_op',
op=test_op,
dtypes=floating_types(),
sample_inputs_func=lambda _: [],
decorators=[
DecorateInfo(test_dec, 'TestParametrized', 'test_op_param',
device_type='cpu', dtypes=[torch.float64],
active_if=lambda p: p['x'] == 2)
])
class TestParametrized(TestCase):
@ops(op_db + [test_op_info])
@parametrize("x", [2, 3])
def test_op_param(self, device, dtype, op, x):
pass
@ops(op_db + [test_op_info])
@parametrize("y", [
subtest(4),
subtest(5, decorators=[test_dec])])
def test_other(self, device, dtype, op, y):
pass
device = self.device_type
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = (name == 'test_op_param_test_op_x_2_cpu_float64' or
('test_other' in name and 'y_5' in name))
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
|
def test_ops_decorator_applies_op_and_param_specific_decorators(self, device):
# Test that decorators can be applied on a per-op / per-param basis.
# Create a test op, OpInfo entry, and decorator to apply.
def test_op(x):
return -x
def test_dec(func):
func._decorator_applied = True
return func
test_op_info = OpInfo(
'test_op',
op=test_op,
dtypes=floating_types(),
sample_inputs_func=lambda _: [],
decorators=[
DecorateInfo(test_dec, 'TestParametrized', 'test_op_param',
device_type='cpu', dtypes=[torch.float64],
active_if=lambda p: p['x'] == 2)
])
class TestParametrized(TestCase):
@ops(op_db + [test_op_info])
@parametrize("x", [2, 3])
def test_op_param(self, device, dtype, op, x):
pass
@ops(op_db + [test_op_info])
@parametrize("y", [
subtest(4),
subtest(5, decorators=[test_dec])])
def test_other(self, device, dtype, op, y):
pass
@decorateIf(test_dec, lambda p: p['dtype'] == torch.int16)
@ops(op_db)
def test_three(self, device, dtype, op):
pass
device = self.device_type
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = (name == 'test_op_param_test_op_x_2_cpu_float64' or
('test_other' in name and 'y_5' in name) or
('test_three' in name and name.endswith('_int16')))
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple, Generator
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, skipIfRocm, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
class TestTestParametrizationDeviceType(TestCase):
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestTestParametrizationDeviceType(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_testing.py
|
test_three
|
device = self.device_type
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = (name == 'test_op_param_test_op_x_2_cpu_float64' or
('test_other' in name and 'y_5' in name))
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
|
def test_three(self, device, dtype, op):
pass
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestParametrized(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_testing.py
|
test_modules_decorator_applies_module_and_param_specific_decorators
|
def test_modules_decorator_applies_module_and_param_specific_decorators(self, device):
# Test that decorators can be applied on a per-module / per-param basis.
# Create a test module, ModuleInfo entry, and decorator to apply.
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.x = torch.nn.Parameter(torch.randn(3))
def forward(self, y):
return self.x + y
def test_dec(func):
func._decorator_applied = True
return func
test_module_info = ModuleInfo(
TestModule,
module_inputs_func=lambda _: [],
decorators=[
DecorateInfo(test_dec, 'TestParametrized', 'test_module_param',
device_type='cpu', dtypes=[torch.float64],
active_if=lambda p: p['x'] == 2)
])
class TestParametrized(TestCase):
@modules(module_db + [test_module_info])
@parametrize("x", [2, 3])
def test_module_param(self, device, dtype, module_info, training, x):
pass
@modules(module_db + [test_module_info])
@parametrize("y", [
subtest(4),
subtest(5, decorators=[test_dec])])
def test_other(self, device, dtype, module_info, training, y):
pass
device = self.device_type
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = (name == 'test_module_param_TestModule_x_2_cpu_float64' or
('test_other' in name and 'y_5' in name))
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
|
def test_modules_decorator_applies_module_and_param_specific_decorators(self, device):
# Test that decorators can be applied on a per-module / per-param basis.
# Create a test module, ModuleInfo entry, and decorator to apply.
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.x = torch.nn.Parameter(torch.randn(3))
def forward(self, y):
return self.x + y
def test_dec(func):
func._decorator_applied = True
return func
test_module_info = ModuleInfo(
TestModule,
module_inputs_func=lambda _: [],
decorators=[
DecorateInfo(test_dec, 'TestParametrized', 'test_module_param',
device_type='cpu', dtypes=[torch.float64],
active_if=lambda p: p['x'] == 2)
])
class TestParametrized(TestCase):
@modules(module_db + [test_module_info])
@parametrize("x", [2, 3])
def test_module_param(self, device, dtype, module_info, training, x):
pass
@modules(module_db + [test_module_info])
@parametrize("y", [
subtest(4),
subtest(5, decorators=[test_dec])])
def test_other(self, device, dtype, module_info, training, y):
pass
@decorateIf(test_dec, lambda p: p['dtype'] == torch.float64)
@modules(module_db)
def test_three(self, device, dtype, module_info):
pass
device = self.device_type
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = (name == 'test_module_param_TestModule_x_2_cpu_float64' or
('test_other' in name and 'y_5' in name) or
('test_three' in name and name.endswith('float64')))
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple, Generator
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, skipIfRocm, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
class TestTestParametrizationDeviceType(TestCase):
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestTestParametrizationDeviceType(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_testing.py
|
__init__
|
def __init__(self):
super().__init__()
self.x = torch.nn.Parameter(torch.randn(3))
|
def __init__(self) -> None:
super().__init__()
self.x = torch.nn.Parameter(torch.randn(3))
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple, Generator
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, skipIfRocm, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
class TestModule(torch.nn.Module):
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_testing.py
|
test_three
|
device = self.device_type
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = (name == 'test_module_param_TestModule_x_2_cpu_float64' or
('test_other' in name and 'y_5' in name))
|
def test_three(self, device, dtype, op):
pass
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestParametrized(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_testing.py
|
test_param_specific_decoration
|
def test_param_specific_decoration(self, device):
def test_dec(func):
func._decorator_applied = True
return func
class TestParametrized(TestCase):
@decorateIf(test_dec, lambda params: params["x"] == 1 and params["y"])
@parametrize("x", range(5))
@parametrize("y", [False, True])
def test_param(self, x, y):
pass
device = self.device_type
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = ('test_param_x_1_y_True' in name)
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestTestParametrizationDeviceType(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_testing.py
|
test_dtypes_composition_valid
|
def test_dtypes_composition_valid(self, device):
# Test checks that @parametrize and @dtypes compose as expected when @parametrize
# doesn't set dtype.
device = self.device_type
class TestParametrized(TestCase):
@dtypes(torch.float32, torch.float64)
@parametrize("x", range(3))
def test_parametrized(self, x, dtype):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()['TestParametrized{}'.format(device.upper())]
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_parametrized_x_0_{}_float32',
'{}.test_parametrized_x_0_{}_float64',
'{}.test_parametrized_x_1_{}_float32',
'{}.test_parametrized_x_1_{}_float64',
'{}.test_parametrized_x_2_{}_float32',
'{}.test_parametrized_x_2_{}_float64')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
|
def test_dtypes_composition_valid(self, device):
# Test checks that @parametrize and @dtypes compose as expected when @parametrize
# doesn't set dtype.
device = self.device_type
class TestParametrized(TestCase):
@dtypes(torch.float32, torch.float64)
@parametrize("x", range(3))
def test_parametrized(self, x, dtype):
pass
instantiate_device_type_tests(TestParametrized, locals(), only_for=device)
device_cls = locals()[f'TestParametrized{device.upper()}']
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_parametrized_x_0_{}_float32',
'{}.test_parametrized_x_0_{}_float64',
'{}.test_parametrized_x_1_{}_float32',
'{}.test_parametrized_x_1_{}_float64',
'{}.test_parametrized_x_2_{}_float32',
'{}.test_parametrized_x_2_{}_float64')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple, Generator
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, skipIfRocm, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
class TestTestParametrizationDeviceType(TestCase):
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestTestParametrizationDeviceType(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_testing.py
|
test_opinfo_error_generators
|
def test_opinfo_error_generators(self, device, op):
# Test op.error_inputs doesn't generate multiple inputs when called
samples = op.error_inputs(device)
self.assertIsInstance(samples, Generator)
|
def test_opinfo_error_generators(self, device, op):
# Test op.error_inputs doesn't generate multiple inputs when called
samples = op.error_inputs(device)
self.assertIsInstance(samples, Iterator)
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple, Generator
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, skipIfRocm, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
class TestOpInfoSampleFunctions(TestCase):
|
import collections
import doctest
import functools
import importlib
import inspect
import itertools
import math
import os
import re
import subprocess
import sys
import unittest.mock
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_JETSON, IS_MACOS, IS_SANDCASTLE, IS_WINDOWS, TestCase, run_tests, slowTest,
parametrize, subtest, instantiate_parametrized_tests, dtype_name, TEST_WITH_ROCM, decorateIf)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCPU, onlyCUDA, onlyNativeDeviceTypes,
deviceCountAtLeast, ops, expectedFailureMeta, OpDTypes)
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal import opinfo
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
import operator
class TestOpInfoSampleFunctions(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_dtypetensor_warnings
|
def test_dtypetensor_warnings(self, device):
msg = 'The torch.cuda.*DtypeTensor constructors are no longer recommended'
with self.assertWarnsOnceRegex(UserWarning, msg):
t = torch.cuda.FloatTensor([0])
with self.assertWarnsOnceRegex(UserWarning, msg):
t = torch.cuda.DoubleTensor([0])
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
class TestTorchDeviceType(TestCase):
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
test_set_default_tensor_type_warnings
|
# TODO: this test should be in test_nn.py
|
def test_set_default_tensor_type_warnings(self, device):
msg = '.*is deprecated as of PyTorch 2.1, please use torch.set_default_dtype().*'
default_type = torch.tensor([]).type()
try:
with self.assertWarnsOnceRegex(UserWarning, msg):
torch.set_default_tensor_type(torch.FloatTensor)
if torch.cuda.is_available():
with self.assertWarnsOnceRegex(UserWarning, msg):
torch.set_default_tensor_type(torch.cuda.FloatTensor)
finally:
torch.set_default_tensor_type(default_type)
# TODO: this test should be in test_nn.py
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
class TestTorchDeviceType(TestCase):
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_torch.py
|
test_scalar_check
|
def test_scalar_check(self, device):
zero_d = torch.randn((), device=device)
one_d = torch.randn((1,), device=device)
# remainder
self.assertEqual((), torch.remainder(zero_d, zero_d).shape)
self.assertEqual((), torch.remainder(zero_d, 2).shape)
self.assertEqual((1,), torch.remainder(zero_d, one_d).shape)
self.assertEqual((1,), torch.remainder(one_d, zero_d).shape)
# fmod
self.assertEqual((), torch.fmod(zero_d, zero_d).shape)
self.assertEqual((), torch.fmod(zero_d, 2).shape)
self.assertEqual((1,), torch.fmod(zero_d, one_d).shape)
self.assertEqual((1,), torch.fmod(one_d, zero_d).shape)
# exp, cos, cosh, tan, atan, tanh, erf, erfc, reciprocal
self.assertEqual((), torch.exp(zero_d).shape)
self.assertEqual((), torch.cos(zero_d).shape)
self.assertEqual((), torch.cosh(zero_d).shape)
self.assertEqual((), torch.tan(zero_d).shape)
self.assertEqual((), torch.atan(zero_d).shape)
self.assertEqual((), torch.acosh(zero_d).shape)
self.assertEqual((), torch.asinh(zero_d).shape)
self.assertEqual((), torch.atanh(zero_d).shape)
self.assertEqual((), torch.tanh(zero_d).shape)
self.assertEqual((), torch.erf(zero_d).shape)
self.assertEqual((), torch.erfc(zero_d).shape)
self.assertEqual((), torch.reciprocal(zero_d).shape)
self.assertEqual((1,), torch.exp(one_d).shape)
self.assertEqual((1,), torch.cos(one_d).shape)
self.assertEqual((1,), torch.cosh(one_d).shape)
self.assertEqual((1,), torch.tan(one_d).shape)
self.assertEqual((1,), torch.atan(one_d).shape)
self.assertEqual((1,), torch.acosh(one_d).shape)
self.assertEqual((1,), torch.asinh(one_d).shape)
self.assertEqual((1,), torch.atanh(one_d).shape)
self.assertEqual((1,), torch.tanh(one_d).shape)
self.assertEqual((1,), torch.erf(one_d).shape)
self.assertEqual((1,), torch.erfc(one_d).shape)
self.assertEqual((1,), torch.reciprocal(one_d).shape)
# clamp
self.assertEqual((), torch.clamp(zero_d, min=0, max=1).shape)
self.assertEqual((), torch.clamp(zero_d, min=0).shape)
self.assertEqual((), torch.clamp(zero_d, max=1).shape)
self.assertEqual((1,), torch.clamp(one_d, min=0, max=1).shape)
self.assertEqual((1,), torch.clamp(one_d, min=0).shape)
self.assertEqual((1,), torch.clamp(one_d, max=1).shape)
# cumsum, cumprod, cummax, cummin
self.assertEqual((), torch.logcumsumexp(zero_d, 0).shape)
self.assertEqual((), torch.cumsum(zero_d, 0).shape)
self.assertEqual((), torch.cumprod(zero_d, 0).shape)
self.assertEqual((), torch.cummax(zero_d, 0)[0].shape)
self.assertEqual((), torch.cummin(zero_d, 0)[0].shape)
# sort, topk
self.assertEqual([(), ()], [x.shape for x in torch.sort(zero_d, 0, False)])
self.assertEqual([(), ()], [x.shape for x in torch.sort(zero_d, 0, True)])
self.assertEqual([(), ()], [x.shape for x in torch.topk(zero_d, 1, 0, False)])
self.assertEqual([(), ()], [x.shape for x in torch.topk(zero_d, 1, 0, True)])
# max, min
self.assertEqual((), torch.max(zero_d, zero_d).shape)
self.assertEqual((1,), torch.max(one_d, zero_d).shape)
self.assertEqual((1,), torch.max(zero_d, one_d).shape)
self.assertEqual((), torch.min(zero_d, zero_d).shape)
self.assertEqual((1,), torch.min(one_d, zero_d).shape)
self.assertEqual((1,), torch.min(zero_d, one_d).shape)
zero_d_int = torch.tensor(1, device=device)
one_d_int = torch.tensor([1], device=device)
# lshift, rshift
self.assertEqual((), (zero_d_int >> zero_d_int).shape)
self.assertEqual((), (zero_d_int >> 1).shape)
self.assertEqual((1,), (one_d_int >> zero_d_int).shape)
self.assertEqual((1,), (zero_d_int >> one_d_int).shape)
self.assertEqual((1,), (one_d_int >> 1).shape)
self.assertEqual((), (zero_d_int << zero_d_int).shape)
self.assertEqual((), (zero_d_int << 1).shape)
self.assertEqual((1,), (one_d_int << zero_d_int).shape)
self.assertEqual((1,), (zero_d_int << one_d_int).shape)
self.assertEqual((1,), (one_d_int << 1).shape)
# or
self.assertEqual((), (zero_d_int | zero_d_int).shape)
self.assertEqual((), (zero_d_int | 1).shape)
self.assertEqual((1,), (one_d_int | zero_d_int).shape)
self.assertEqual((1,), (zero_d_int | one_d_int).shape)
self.assertEqual((1,), (one_d_int | 1).shape)
# and
self.assertEqual((), (zero_d_int & zero_d_int).shape)
self.assertEqual((), (zero_d_int & 1).shape)
self.assertEqual((1,), (one_d_int & zero_d_int).shape)
self.assertEqual((1,), (zero_d_int & one_d_int).shape)
self.assertEqual((1,), (one_d_int & 1).shape)
# clone
self.assertEqual((), zero_d.clone().shape)
zero_d_bool = torch.tensor(True, device=device)
one_d_bool = torch.tensor([True], device=device)
# masked_select
self.assertEqual((1,), torch.masked_select(zero_d_bool, zero_d_bool).shape)
self.assertEqual((1,), torch.masked_select(zero_d_bool, one_d_bool).shape)
self.assertEqual((1,), torch.masked_select(one_d_bool, zero_d_bool).shape)
zero_d_uint8 = torch.tensor(1, dtype=torch.uint8, device=device)
one_d_uint8 = torch.tensor([1], dtype=torch.uint8, device=device)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertEqual((1,), torch.masked_select(zero_d_uint8, zero_d_uint8).shape)
self.assertEqual((1,), torch.masked_select(zero_d_uint8, one_d_uint8).shape)
self.assertEqual((1,), torch.masked_select(one_d_uint8, zero_d_uint8).shape)
# mode
self.assertEqual([(), ()], [x.shape for x in torch.mode(zero_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.mode(zero_d, dim=0, keepdim=False)])
self.assertEqual([(1,), (1,)], [x.shape for x in torch.mode(one_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.mode(one_d, dim=0, keepdim=False)])
# max
self.assertEqual([(), ()], [x.shape for x in torch.max(zero_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.max(zero_d, dim=0, keepdim=False)])
self.assertEqual([(1,), (1,)], [x.shape for x in torch.max(one_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.max(one_d, dim=0, keepdim=False)])
# amax
self.assertEqual((), torch.amax(zero_d, dim=0, keepdim=True).shape)
self.assertEqual((), torch.amax(zero_d, dim=0, keepdim=False).shape)
self.assertEqual((1,), torch.amax(one_d, dim=0, keepdim=True).shape)
self.assertEqual((), torch.amax(one_d, dim=0, keepdim=False).shape)
# min
self.assertEqual([(), ()], [x.shape for x in torch.min(zero_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.min(zero_d, dim=0, keepdim=False)])
self.assertEqual([(1,), (1,)], [x.shape for x in torch.min(one_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.min(one_d, dim=0, keepdim=False)])
# amin
self.assertEqual((), torch.amin(zero_d, dim=0, keepdim=True).shape)
self.assertEqual((), torch.amin(zero_d, dim=0, keepdim=False).shape)
self.assertEqual((1,), torch.amin(one_d, dim=0, keepdim=True).shape)
self.assertEqual((), torch.amin(one_d, dim=0, keepdim=False).shape)
# set_
zero_d_clone = zero_d.clone()
one_d_clone = one_d.clone()
self.assertEqual((), zero_d_clone.set_(one_d.storage(), 0, (), ()).shape)
self.assertEqual((1,), zero_d_clone.set_(one_d.storage(), 0, (1,), (1,)).shape)
self.assertEqual((), one_d_clone.set_(one_d.storage(), 0, (), ()).shape)
self.assertEqual((1,), one_d_clone.set_(one_d.storage(), 0, (1,), (1,)).shape)
self.assertEqual((), zero_d.clone().set_(zero_d).shape)
self.assertEqual((), one_d.clone().set_(zero_d).shape)
self.assertEqual((1,), zero_d.clone().set_(one_d).shape)
self.assertEqual((1,), one_d.clone().set_(one_d).shape)
# take
self.assertEqual((), torch.randn((2, 3), device=device).take(zero_d_int).shape)
self.assertEqual((1,), torch.randn((2, 3), device=device).take(one_d_int).shape)
# gather
self.assertEqual((), torch.gather(zero_d, 0, torch.zeros((), dtype=torch.int64, device=device)).shape)
self.assertEqual((1,), torch.gather(zero_d, 0, torch.zeros((1,), dtype=torch.int64, device=device)).shape)
self.assertEqual((), torch.gather(one_d, 0, torch.zeros((), dtype=torch.int64, device=device)).shape)
self.assertEqual((1,), torch.gather(one_d, 0, torch.zeros((1,), dtype=torch.int64, device=device)).shape)
# normal
# std must be >= 0
zero_d_ge_0 = torch.rand((), device=device)
# documentation says out shape matches shape of mean
self.assertEqual((), torch.normal(zero_d, zero_d_ge_0).shape)
self.assertEqual((1,), torch.normal(one_d, zero_d_ge_0).shape)
self.assertEqual((), torch.normal(1, zero_d_ge_0).shape)
self.assertEqual((), torch.normal(zero_d, 1).shape)
self.assertEqual((1,), torch.normal(one_d, 1).shape)
# TODO: this behavior differs on CPU and GPU, see https://github.com/pytorch/pytorch/issues/30480.
# self.assertEqual((), torch.normal(zero_d, one_d).shape)
# self.assertEqual((), torch.normal(1, one_d).shape)
# convolutions. Yes, we are testing nn.functional here; seems justified
# given its similar to the other tests
w = torch.randn(2, 1, 3, 3, device=device).div_(2).requires_grad_()
self.assertRaises(RuntimeError, lambda: torch.nn.functional.conv2d(zero_d, w, groups=1))
self.assertRaises(RuntimeError, lambda: torch.nn.functional.conv2d(zero_d, w, groups=2))
# nll_loss -- verify input can't be 0-dimensional.
self.assertRaises(ValueError, lambda: torch.nn.functional.nll_loss(zero_d, zero_d, reduction='none'))
self.assertRaises(ValueError, lambda: torch.nn.functional.nll_loss(zero_d, one_d, reduction='none'))
# verify output is 0-dimensional when reduction != 'none'
for (input, target) in ((torch.randn(1, 1, device=device), torch.tensor([0], device=device)),
(torch.randn(1, 1, 1, 1, device=device), torch.tensor([[[0]]], device=device))):
self.assertEqual((), torch.nn.functional.nll_loss(input, target, reduction='mean').shape)
self.assertEqual((), torch.nn.functional.nll_loss(input, target, reduction='sum').shape)
# multilabel_margin_loss
for input in (zero_d, one_d, torch.randn(1, 1, device=device)):
for target in (torch.tensor(0, device=device), torch.tensor([0], device=device), torch.tensor([[0]], device=device)):
if (input.dim() <= 1 and target.dim() <= 1) or (input.dim() == 2 and target.dim() == 2):
output_shape = (target.shape[0],) if target.dim() == 2 else ()
self.assertEqual(output_shape,
torch.nn.functional.multilabel_margin_loss(input, target, reduction='none').shape)
self.assertEqual((), torch.nn.functional.multilabel_margin_loss(input, target, reduction='mean').shape)
self.assertEqual((), torch.nn.functional.multilabel_margin_loss(input, target, reduction='sum').shape)
else:
self.assertRaises(RuntimeError,
lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='none'))
self.assertRaises(RuntimeError,
lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='mean'))
self.assertRaises(RuntimeError,
lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='sum'))
# multi_margin_loss
for input in (zero_d, one_d, torch.randn(1, 1, device=device)):
for target in (torch.tensor(0, device=device), torch.tensor([0], device=device)):
self.assertEqual(target.shape, torch.nn.functional.multi_margin_loss(input, target, reduction='none').shape)
self.assertEqual((), torch.nn.functional.multi_margin_loss(input, target, reduction='mean').shape)
self.assertEqual((), torch.nn.functional.multi_margin_loss(input, target, reduction='sum').shape)
# Test that `TORCH_CHECK_TENSOR_ALL` raises errors that propagate from C++ to Python
|
def test_scalar_check(self, device):
zero_d = torch.randn((), device=device)
one_d = torch.randn((1,), device=device)
# remainder
self.assertEqual((), torch.remainder(zero_d, zero_d).shape)
self.assertEqual((), torch.remainder(zero_d, 2).shape)
self.assertEqual((1,), torch.remainder(zero_d, one_d).shape)
self.assertEqual((1,), torch.remainder(one_d, zero_d).shape)
# fmod
self.assertEqual((), torch.fmod(zero_d, zero_d).shape)
self.assertEqual((), torch.fmod(zero_d, 2).shape)
self.assertEqual((1,), torch.fmod(zero_d, one_d).shape)
self.assertEqual((1,), torch.fmod(one_d, zero_d).shape)
# exp, cos, cosh, tan, atan, tanh, erf, erfc, reciprocal
self.assertEqual((), torch.exp(zero_d).shape)
self.assertEqual((), torch.cos(zero_d).shape)
self.assertEqual((), torch.cosh(zero_d).shape)
self.assertEqual((), torch.tan(zero_d).shape)
self.assertEqual((), torch.atan(zero_d).shape)
self.assertEqual((), torch.acosh(zero_d).shape)
self.assertEqual((), torch.asinh(zero_d).shape)
self.assertEqual((), torch.atanh(zero_d).shape)
self.assertEqual((), torch.tanh(zero_d).shape)
self.assertEqual((), torch.erf(zero_d).shape)
self.assertEqual((), torch.erfc(zero_d).shape)
self.assertEqual((), torch.reciprocal(zero_d).shape)
self.assertEqual((1,), torch.exp(one_d).shape)
self.assertEqual((1,), torch.cos(one_d).shape)
self.assertEqual((1,), torch.cosh(one_d).shape)
self.assertEqual((1,), torch.tan(one_d).shape)
self.assertEqual((1,), torch.atan(one_d).shape)
self.assertEqual((1,), torch.acosh(one_d).shape)
self.assertEqual((1,), torch.asinh(one_d).shape)
self.assertEqual((1,), torch.atanh(one_d).shape)
self.assertEqual((1,), torch.tanh(one_d).shape)
self.assertEqual((1,), torch.erf(one_d).shape)
self.assertEqual((1,), torch.erfc(one_d).shape)
self.assertEqual((1,), torch.reciprocal(one_d).shape)
# clamp
self.assertEqual((), torch.clamp(zero_d, min=0, max=1).shape)
self.assertEqual((), torch.clamp(zero_d, min=0).shape)
self.assertEqual((), torch.clamp(zero_d, max=1).shape)
self.assertEqual((1,), torch.clamp(one_d, min=0, max=1).shape)
self.assertEqual((1,), torch.clamp(one_d, min=0).shape)
self.assertEqual((1,), torch.clamp(one_d, max=1).shape)
# cumsum, cumprod, cummax, cummin
self.assertEqual((), torch.logcumsumexp(zero_d, 0).shape)
self.assertEqual((), torch.cumsum(zero_d, 0).shape)
self.assertEqual((), torch.cumprod(zero_d, 0).shape)
self.assertEqual((), torch.cummax(zero_d, 0)[0].shape)
self.assertEqual((), torch.cummin(zero_d, 0)[0].shape)
# sort, topk
self.assertEqual([(), ()], [x.shape for x in torch.sort(zero_d, 0, False)])
self.assertEqual([(), ()], [x.shape for x in torch.sort(zero_d, 0, True)])
self.assertEqual([(), ()], [x.shape for x in torch.topk(zero_d, 1, 0, False)])
self.assertEqual([(), ()], [x.shape for x in torch.topk(zero_d, 1, 0, True)])
# max, min
self.assertEqual((), torch.max(zero_d, zero_d).shape)
self.assertEqual((1,), torch.max(one_d, zero_d).shape)
self.assertEqual((1,), torch.max(zero_d, one_d).shape)
self.assertEqual((), torch.min(zero_d, zero_d).shape)
self.assertEqual((1,), torch.min(one_d, zero_d).shape)
self.assertEqual((1,), torch.min(zero_d, one_d).shape)
zero_d_int = torch.tensor(1, device=device)
one_d_int = torch.tensor([1], device=device)
# lshift, rshift
self.assertEqual((), (zero_d_int >> zero_d_int).shape)
self.assertEqual((), (zero_d_int >> 1).shape)
self.assertEqual((1,), (one_d_int >> zero_d_int).shape)
self.assertEqual((1,), (zero_d_int >> one_d_int).shape)
self.assertEqual((1,), (one_d_int >> 1).shape)
self.assertEqual((), (zero_d_int << zero_d_int).shape)
self.assertEqual((), (zero_d_int << 1).shape)
self.assertEqual((1,), (one_d_int << zero_d_int).shape)
self.assertEqual((1,), (zero_d_int << one_d_int).shape)
self.assertEqual((1,), (one_d_int << 1).shape)
# or
self.assertEqual((), (zero_d_int | zero_d_int).shape)
self.assertEqual((), (zero_d_int | 1).shape)
self.assertEqual((1,), (one_d_int | zero_d_int).shape)
self.assertEqual((1,), (zero_d_int | one_d_int).shape)
self.assertEqual((1,), (one_d_int | 1).shape)
# and
self.assertEqual((), (zero_d_int & zero_d_int).shape)
self.assertEqual((), (zero_d_int & 1).shape)
self.assertEqual((1,), (one_d_int & zero_d_int).shape)
self.assertEqual((1,), (zero_d_int & one_d_int).shape)
self.assertEqual((1,), (one_d_int & 1).shape)
# clone
self.assertEqual((), zero_d.clone().shape)
zero_d_bool = torch.tensor(True, device=device)
one_d_bool = torch.tensor([True], device=device)
# masked_select
self.assertEqual((1,), torch.masked_select(zero_d_bool, zero_d_bool).shape)
self.assertEqual((1,), torch.masked_select(zero_d_bool, one_d_bool).shape)
self.assertEqual((1,), torch.masked_select(one_d_bool, zero_d_bool).shape)
zero_d_uint8 = torch.tensor(1, dtype=torch.uint8, device=device)
one_d_uint8 = torch.tensor([1], dtype=torch.uint8, device=device)
# mode
self.assertEqual([(), ()], [x.shape for x in torch.mode(zero_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.mode(zero_d, dim=0, keepdim=False)])
self.assertEqual([(1,), (1,)], [x.shape for x in torch.mode(one_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.mode(one_d, dim=0, keepdim=False)])
# max
self.assertEqual([(), ()], [x.shape for x in torch.max(zero_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.max(zero_d, dim=0, keepdim=False)])
self.assertEqual([(1,), (1,)], [x.shape for x in torch.max(one_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.max(one_d, dim=0, keepdim=False)])
# amax
self.assertEqual((), torch.amax(zero_d, dim=0, keepdim=True).shape)
self.assertEqual((), torch.amax(zero_d, dim=0, keepdim=False).shape)
self.assertEqual((1,), torch.amax(one_d, dim=0, keepdim=True).shape)
self.assertEqual((), torch.amax(one_d, dim=0, keepdim=False).shape)
# min
self.assertEqual([(), ()], [x.shape for x in torch.min(zero_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.min(zero_d, dim=0, keepdim=False)])
self.assertEqual([(1,), (1,)], [x.shape for x in torch.min(one_d, dim=0, keepdim=True)])
self.assertEqual([(), ()], [x.shape for x in torch.min(one_d, dim=0, keepdim=False)])
# amin
self.assertEqual((), torch.amin(zero_d, dim=0, keepdim=True).shape)
self.assertEqual((), torch.amin(zero_d, dim=0, keepdim=False).shape)
self.assertEqual((1,), torch.amin(one_d, dim=0, keepdim=True).shape)
self.assertEqual((), torch.amin(one_d, dim=0, keepdim=False).shape)
# set_
zero_d_clone = zero_d.clone()
one_d_clone = one_d.clone()
self.assertEqual((), zero_d_clone.set_(one_d.storage(), 0, (), ()).shape)
self.assertEqual((1,), zero_d_clone.set_(one_d.storage(), 0, (1,), (1,)).shape)
self.assertEqual((), one_d_clone.set_(one_d.storage(), 0, (), ()).shape)
self.assertEqual((1,), one_d_clone.set_(one_d.storage(), 0, (1,), (1,)).shape)
self.assertEqual((), zero_d.clone().set_(zero_d).shape)
self.assertEqual((), one_d.clone().set_(zero_d).shape)
self.assertEqual((1,), zero_d.clone().set_(one_d).shape)
self.assertEqual((1,), one_d.clone().set_(one_d).shape)
# take
self.assertEqual((), torch.randn((2, 3), device=device).take(zero_d_int).shape)
self.assertEqual((1,), torch.randn((2, 3), device=device).take(one_d_int).shape)
# gather
self.assertEqual((), torch.gather(zero_d, 0, torch.zeros((), dtype=torch.int64, device=device)).shape)
self.assertEqual((1,), torch.gather(zero_d, 0, torch.zeros((1,), dtype=torch.int64, device=device)).shape)
self.assertEqual((), torch.gather(one_d, 0, torch.zeros((), dtype=torch.int64, device=device)).shape)
self.assertEqual((1,), torch.gather(one_d, 0, torch.zeros((1,), dtype=torch.int64, device=device)).shape)
# normal
# std must be >= 0
zero_d_ge_0 = torch.rand((), device=device)
# documentation says out shape matches shape of mean
self.assertEqual((), torch.normal(zero_d, zero_d_ge_0).shape)
self.assertEqual((1,), torch.normal(one_d, zero_d_ge_0).shape)
self.assertEqual((), torch.normal(1, zero_d_ge_0).shape)
self.assertEqual((), torch.normal(zero_d, 1).shape)
self.assertEqual((1,), torch.normal(one_d, 1).shape)
# TODO: this behavior differs on CPU and GPU, see https://github.com/pytorch/pytorch/issues/30480.
# self.assertEqual((), torch.normal(zero_d, one_d).shape)
# self.assertEqual((), torch.normal(1, one_d).shape)
# convolutions. Yes, we are testing nn.functional here; seems justified
# given its similar to the other tests
w = torch.randn(2, 1, 3, 3, device=device).div_(2).requires_grad_()
self.assertRaises(RuntimeError, lambda: torch.nn.functional.conv2d(zero_d, w, groups=1))
self.assertRaises(RuntimeError, lambda: torch.nn.functional.conv2d(zero_d, w, groups=2))
# nll_loss -- verify input can't be 0-dimensional.
self.assertRaises(ValueError, lambda: torch.nn.functional.nll_loss(zero_d, zero_d, reduction='none'))
self.assertRaises(ValueError, lambda: torch.nn.functional.nll_loss(zero_d, one_d, reduction='none'))
# verify output is 0-dimensional when reduction != 'none'
for (input, target) in ((torch.randn(1, 1, device=device), torch.tensor([0], device=device)),
(torch.randn(1, 1, 1, 1, device=device), torch.tensor([[[0]]], device=device))):
self.assertEqual((), torch.nn.functional.nll_loss(input, target, reduction='mean').shape)
self.assertEqual((), torch.nn.functional.nll_loss(input, target, reduction='sum').shape)
# Test that `torch._check_tensor_all` raises errors in the correct cases
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
class TestTorchDeviceType(TestCase):
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
class TestTorchDeviceType(TestCase):
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_check_tensor_all
|
def test_check_tensor_all(self, device):
default_message = 'Expected cond to be True'
check_fn = torch._check_tensor_all
expected_error = RuntimeError
# cond must be a tensor
with self.assertRaisesRegex(TypeError, 'cond must be a tensor'):
check_fn(True)
# cond tensor must be boolean
with self.assertRaisesRegex(TypeError, 'cond tensor must have dtype torch.bool'):
check_fn(torch.ones(1, device=device))
test_sizes = [
(),
(1,),
(10,),
(1, 1),
(1, 10),
(10, 1),
(10, 10),
(1, 1, 1),
(10, 1, 1),
(1, 10, 1),
(10, 10, 10),
]
for size in test_sizes:
t_all_true = torch.ones(size, dtype=torch.bool, device=device)
t_all_false = torch.zeros(size, dtype=torch.bool, device=device)
# Should not raise error
check_fn(t_all_true)
with self.assertRaisesRegex(expected_error, default_message):
check_fn(t_all_false)
if t_all_true.numel() > 1:
t_all_true_but_one = t_all_true.clone()
# Choose a random element to set to false
idx = (random.choice(range(dim_size)) for dim_size in size)
t_all_true_but_one[(..., *idx)] = False
with self.assertRaisesRegex(expected_error, default_message):
check_fn(t_all_true_but_one)
# Test a simple failure message
message = 'message'
with self.assertRaisesRegex(expected_error, message):
check_fn(t_all_false, lambda: message)
# Test message with tensor
def message():
return torch.arange(4)
with self.assertRaisesRegex(expected_error, re.escape(str(message()))):
check_fn(t_all_false, message)
# Test format string message
def message():
return f"{'test'} {[1, 2, 'a', True]} {True} {100} {torch.arange(4)}"
with self.assertRaisesRegex(expected_error, re.escape(str(message()))):
check_fn(t_all_false, message)
# Test that `TORCH_CHECK_TENSOR_ALL` raises errors that propagate from C++ to Python
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
class TestTorchDeviceType(TestCase):
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
_cond_fn
|
def _cond_fn(x):
if x: # taking boolean value of a tensor synchronizes
return x
else:
return 2 * x
# prepare inputs for subsequent ops
size = 4
x = torch.rand(size, device=device)
y = torch.rand((), device=device)
ind = torch.randint(size, (3,), device=device)
ind_cpu = ind.cpu()
repeats = torch.full((1,), 2, device=device)
mask = torch.randint(2, (size,), device=device, dtype=bool)
expect_no_sync = (lambda: _ind_put_fn(x, mask, 1.),
lambda: _ind_put_fn(x, ind, y),
lambda: _ind_get_fn(x, ind),
lambda: torch.nn.functional.one_hot(ind, num_classes=size),
lambda: torch.randperm(20000, device=device),
lambda: torch.repeat_interleave(x, 2, output_size=2 * size),
lambda: torch.repeat_interleave(x, repeats, output_size=2 * size),
lambda: torch.any(y))
expect_sync = (lambda: _ind_put_fn(x, mask, y),
lambda: _ind_put_fn(x, ind_cpu, y),
lambda: _ind_get_fn(x, mask),
lambda: _ind_get_fn(x, ind_cpu),
lambda: x.nonzero(),
lambda: _cond_fn(y),
lambda: torch.nn.functional.one_hot(ind),
lambda: torch.repeat_interleave(x, 2),
lambda: torch.repeat_interleave(x, repeats))
for f, level in product(expect_no_sync, (1, 2)):
_no_sync_helper(f, level)
for f, level in product(expect_sync, (1, 2)):
_sync_raises_helper(f, level)
|
def _cond_fn(x):
if x: # taking boolean value of a tensor synchronizes
return x
else:
return 2 * x
# prepare inputs for subsequent ops
size = 4
x = torch.rand(size, device=device)
y = torch.rand((), device=device)
ind = torch.randint(size, (3,), device=device)
ind_cpu = ind.cpu()
repeats = torch.full((1,), 2, device=device)
mask = torch.randint(2, (size,), device=device, dtype=bool)
expect_no_sync = (lambda: _ind_put_fn(x, mask, 1.),
lambda: _ind_put_fn(x, ind, y),
lambda: _ind_get_fn(x, ind),
lambda: torch.nn.functional.one_hot(ind, num_classes=size),
lambda: torch.randperm(20000, device=device),
lambda: torch.repeat_interleave(x, 2, output_size=2 * size),
lambda: torch.repeat_interleave(x, repeats, output_size=2 * size),
lambda: torch.any(y))
expect_sync = (lambda: _ind_put_fn(x, mask, y),
lambda: _ind_put_fn(x, ind_cpu, y),
lambda: _ind_get_fn(x, mask),
lambda: _ind_get_fn(x, ind_cpu),
lambda: x.nonzero(),
lambda: _cond_fn(y),
lambda: torch.nn.functional.one_hot(ind),
lambda: torch.repeat_interleave(x, repeats))
for f, level in product(expect_no_sync, (1, 2)):
_no_sync_helper(f, level)
for f, level in product(expect_sync, (1, 2)):
_sync_raises_helper(f, level)
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_gradient_spacing_list_length_error
|
def test_gradient_spacing_list_length_error(self, device, dtype):
t = make_tensor((2, 2), device=device, dtype=dtype)
spacing = (make_tensor((2,), device=device, dtype=dtype),)
with self.assertRaisesRegex(RuntimeError, r'expected spacing to be'):
torch.gradient(t, spacing=spacing)
spacing = (make_tensor((2,), device=device, dtype=dtype),) * 2
torch.gradient(t, spacing=spacing)
spacing = (make_tensor((2,), device=device, dtype=dtype),) * 3
with self.assertRaisesRegex(RuntimeError, r'expected spacing to be'):
torch.gradient(t, spacing=spacing)
spacing = (2,)
with self.assertRaisesRegex(RuntimeError, r'expected spacing to be'):
torch.gradient(t, spacing=spacing)
spacing = (2, 2)
torch.gradient(t, spacing=spacing)
spacing = (2, 2, 2)
with self.assertRaisesRegex(RuntimeError, r'expected spacing to be'):
torch.gradient(t, spacing=spacing)
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
class TestTorchDeviceType(TestCase):
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
test_masked_select
|
def test_masked_select(self, device, dtype):
if device == 'cpu':
warn = 'masked_select received a mask with dtype torch.uint8,'
else:
warn = 'indexing with dtype torch.uint8 is now deprecated, pl'
for maskType in [torch.uint8, torch.bool]:
num_src = 10
src = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=dtype, device=device)
mask = torch.randint(2, (num_src,), device=device, dtype=maskType)
with warnings.catch_warnings(record=True) as w:
dst = src.masked_select(mask)
if maskType is torch.uint8:
self.assertEqual(len(w), 1)
self.assertEqual(str(w[0].message)[0:53], str(warn))
dst2 = []
for i in range(num_src):
if mask[i]:
dst2 += [src[i]]
self.assertEqual(dst, torch.tensor(dst2), atol=0, rtol=0)
dst3 = torch.empty(0, device=device, dtype=dtype)
torch.masked_select(src, mask, out=dst3)
self.assertEqual(dst3, torch.tensor(dst2, dtype=dst3.dtype), atol=0, rtol=0)
# Since half on CPU is not supported, need to skip the remaining test cases
if dtype == torch.half and torch.device(device).type == 'cpu':
return
# Ensure that masks are expanded to match tensor properly
a = torch.rand(100, 100, device=device).mul(100).to(dtype)
mask_first_el_each_row = torch.zeros(100, device=device, dtype=torch.bool)
mask_first_el_each_row[0] = True
a_masked = a.masked_select(mask_first_el_each_row)
self.assertEqual(a_masked, a[:, 0])
mask_first_row = torch.zeros(100, 1, device=device, dtype=torch.bool)
mask_first_row[0][0] = True
a_masked = a.masked_select(mask_first_row)
self.assertEqual(a_masked, a[0, :])
# Ensure that tensor is expanded to match mask properly
a = torch.rand(100, device=device).mul(100).to(dtype)
mask_copy_3_times = torch.tensor([[True], [True], [False], [True]], device=device)
a_masked = a.masked_select(mask_copy_3_times)
self.assertEqual(a_masked, a.unsqueeze(0).expand(3, 100).flatten())
# FIXME: find a test suite for the masked select operator
|
def test_masked_select(self, device, dtype):
if device == 'cpu':
warn = 'masked_select received a mask with dtype torch.uint8,'
else:
warn = 'indexing with dtype torch.uint8 is now deprecated, pl'
for maskType in integral_types_and(torch.bool):
num_src = 10
src = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=dtype, device=device)
mask = torch.randint(2, (num_src,), device=device, dtype=maskType)
if maskType is not torch.bool:
with self.assertRaisesRegex(RuntimeError, r'expected BoolTensor for mask'):
dst = src.masked_select(mask)
continue
else:
dst = src.masked_select(mask)
dst2 = []
for i in range(num_src):
if mask[i]:
dst2 += [src[i]]
self.assertEqual(dst, torch.tensor(dst2), atol=0, rtol=0)
dst3 = torch.empty(0, device=device, dtype=dtype)
torch.masked_select(src, mask, out=dst3)
self.assertEqual(dst3, torch.tensor(dst2, dtype=dst3.dtype), atol=0, rtol=0)
# Since half on CPU is not supported, need to skip the remaining test cases
if dtype == torch.half and torch.device(device).type == 'cpu':
return
# Ensure that masks are expanded to match tensor properly
a = torch.rand(100, 100, device=device).mul(100).to(dtype)
mask_first_el_each_row = torch.zeros(100, device=device, dtype=torch.bool)
mask_first_el_each_row[0] = True
a_masked = a.masked_select(mask_first_el_each_row)
self.assertEqual(a_masked, a[:, 0])
mask_first_row = torch.zeros(100, 1, device=device, dtype=torch.bool)
mask_first_row[0][0] = True
a_masked = a.masked_select(mask_first_row)
self.assertEqual(a_masked, a[0, :])
# Ensure that tensor is expanded to match mask properly
a = torch.rand(100, device=device).mul(100).to(dtype)
mask_copy_3_times = torch.tensor([[True], [True], [False], [True]], device=device)
a_masked = a.masked_select(mask_copy_3_times)
self.assertEqual(a_masked, a.unsqueeze(0).expand(3, 100).flatten())
# FIXME: find a test suite for the masked select operator
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
class TestTorchDeviceType(TestCase):
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
class TestTorchDeviceType(TestCase):
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_masked_fill
|
def test_masked_fill(self, device, dtypes):
dtype = dtypes[0]
mask_dtype = dtypes[1]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
num_dest = 10
dst = torch.zeros(num_dest, dtype=dtype)
mask = torch.randint(2, (num_dest,), dtype=mask_dtype)
val = random.random()
dst2 = dst.clone()
dst.masked_fill_(mask, val)
for i in range(num_dest):
if mask[i]:
dst2[i] = val
self.assertEqual(dst, dst2, atol=0, rtol=0)
# test non-contiguous case
dst = ((torch.randn(num_dest, num_dest, num_dest) * 10).to(dtype)).permute((2, 0, 1))
dst2 = dst.contiguous()
if dtype.is_complex:
mask = dst.abs() > 0
else:
mask = dst > 0
self.assertTrue(not dst.is_contiguous())
self.assertTrue(dst2.is_contiguous())
dst.masked_fill_(mask.to(mask_dtype), val)
dst2.masked_fill_(mask.to(mask_dtype), val)
self.assertEqual(dst, dst2, atol=0, rtol=0)
if mask_dtype == torch.uint8:
self.assertEqual(len(w), 3)
warn = 'masked_fill_ received a mask with dtype torch.uint8,'
for wi in w:
self.assertEqual(str(wi.message)[0:52], str(warn))
else:
self.assertEqual(len(w), 0)
# FIXME: find a test suite for the masked fill operator
|
def test_masked_fill(self, device, dtypes):
dtype = dtypes[0]
mask_dtype = dtypes[1]
num_dest = 10
dst = torch.zeros(num_dest, dtype=dtype)
mask = torch.randint(2, (num_dest,), dtype=mask_dtype)
val = random.random()
dst2 = dst.clone()
if mask_dtype is not torch.bool:
with self.assertRaisesRegex(RuntimeError, 'only supports boolean masks'):
dst.masked_fill_(mask, val)
return
dst.masked_fill_(mask, val)
for i in range(num_dest):
if mask[i]:
dst2[i] = val
self.assertEqual(dst, dst2, atol=0, rtol=0)
# test non-contiguous case
dst = ((torch.randn(num_dest, num_dest, num_dest) * 10).to(dtype)).permute((2, 0, 1))
dst2 = dst.contiguous()
if dtype.is_complex:
mask = dst.abs() > 0
else:
mask = dst > 0
self.assertTrue(not dst.is_contiguous())
self.assertTrue(dst2.is_contiguous())
dst.masked_fill_(mask.to(mask_dtype), val)
dst2.masked_fill_(mask.to(mask_dtype), val)
self.assertEqual(dst, dst2, atol=0, rtol=0)
# FIXME: find a test suite for the masked fill operator
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
class TestTorchDeviceType(TestCase):
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
class TestTorchDeviceType(TestCase):
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
step
|
def step(self, closure=None, *, grad_scaler=None):
self.tester.assertTrue(isinstance(grad_scaler, torch.amp.GradScaler))
self.tester.assertFalse(hasattr(self, "grad_scale"))
self.tester.assertFalse(hasattr(self, "found_inf"))
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class Optimizer1(_PlaceHolderOptimizer):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
step
|
def step(self, closure=None, *, grad_scaler=None):
self.tester.assertTrue(isinstance(grad_scaler, torch.amp.GradScaler))
self.tester.assertFalse(hasattr(self, "grad_scale"))
self.tester.assertFalse(hasattr(self, "found_inf"))
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class Optimizer1(_PlaceHolderOptimizer):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
test_grad_scaling_state_dict
|
def test_grad_scaling_state_dict(self, device):
device = torch.device(device)
GradScaler = partial(torch.GradScaler, device=device.type)
for lazy_init_scale in True, False:
s0 = GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
s1 = GradScaler(init_scale=6., growth_factor=7., backoff_factor=.8, growth_interval=1)
# sets a random value for load_state_dict to overwrite
s1._init_growth_tracker = 7
if lazy_init_scale:
# Dummy scale() call to ensure the scale tensor is lazily initialized.
s1.scale(torch.full((1,), 4.0, dtype=torch.float32, device=device))
if "cuda" == device.type:
self.assertTrue(isinstance(s1._scale, torch.cuda.FloatTensor))
else:
self.assertTrue(isinstance(s1._scale, torch.FloatTensor))
s1.load_state_dict(s0.state_dict())
self.assertEqual(s1.get_scale(), 3.)
self.assertEqual(s1.get_growth_factor(), 4.)
self.assertEqual(s1.get_backoff_factor(), .5)
self.assertEqual(s1.get_growth_interval(), 2)
self.assertEqual(s1._init_growth_tracker, 0)
# _run_scaling_case generalizes some single-optimizer test logic to avoid too much copy-pasting below.
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
class TestTorchDeviceType(TestCase):
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
test_check
|
def test_check(self):
test_cases = [
# check function, expected error
(torch._check, RuntimeError),
(torch._check_index, IndexError),
(torch._check_value, ValueError),
(torch._check_type, TypeError),
(torch._check_not_implemented, NotImplementedError),
]
for check_fn, expected_error in test_cases:
# cond=True should not raise an error
check_fn(True)
# Test default failure message for cond=False
default_message = 'Expected cond to be True'
with self.assertRaisesRegex(expected_error, default_message):
check_fn(False)
# Test a simple failure message
message = 'message'
with self.assertRaisesRegex(expected_error, message):
check_fn(False, lambda: message)
# Test message with tensor
def message():
return torch.arange(4)
with self.assertRaisesRegex(expected_error, re.escape(str(message()))):
check_fn(False, message)
# Test format string message
def message():
return f"{'test'} {[1, 2, 'a', True]} {True} {100} {torch.arange(4)}"
with self.assertRaisesRegex(expected_error, re.escape(str(message()))):
check_fn(False, message)
# Test incorrect `cond` arg type
with self.assertRaisesRegex(TypeError, 'cond must be a bool'):
check_fn('wrong type')
with self.assertRaisesRegex(TypeError, 'cond must be a bool'):
check_fn(torch.tensor(True))
# FIXME: move to indexing test suite
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
test_pickle_gradscaler
|
def test_pickle_gradscaler(self, device):
# This test is not in test_cuda.py because it should pass in 3 cases:
# 1. cuda is not available.
# 2. cuda is available but device is not cuda.
# 3. cuda is available and device is cuda.
# In case 1, a and b disable themselves on construction and shouldn't try to pickle workhorse attributes.
# In case 2, a and b are enabled. Workhorse attributes participate in pickling, but none are lazy-inited
# to cuda Tensors, because I don't want to do cuda things if device is not cuda.
# In case 3, a and b are enabled and we may also try lazy-initing _scale to a cuda tensor.
device = torch.device(device)
try_lazy_inits = (True, False) if device.type == "cuda" else (False,)
for lazy_init_scale in try_lazy_inits:
a = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
self.assertTrue(not a.is_enabled() if torch.cuda.amp.common.amp_definitely_not_available() else a.is_enabled())
if lazy_init_scale:
# Dummy a.scale() call lazy-inits a._scale Tensor.
a.scale(torch.tensor([4.0], dtype=torch.float32, device=device))
self.assertTrue(isinstance(a._scale, torch.cuda.FloatTensor))
# The following three lines should work whether or not cuda is available.
serialized = pickle.dumps(a)
b = pickle.loads(serialized)
self.assertEqual(b.is_enabled(), a.is_enabled())
if a.is_enabled():
self.assertEqual(b.get_scale(), 3.)
self.assertEqual(b.get_growth_factor(), 4.)
self.assertEqual(b.get_backoff_factor(), .5)
self.assertEqual(b.get_growth_interval(), 2)
self.assertEqual(b._init_growth_tracker, 0)
# supplies a dummy key to test the defaultdict's default_factory
self.assertEqual(b._per_optimizer_states["fdsa"],
torch.cuda.amp.grad_scaler._refresh_per_optimizer_state())
if lazy_init_scale:
self.assertEqual(b.scale(torch.tensor([4.0], dtype=torch.float32, device=device)), 12.0)
# FIXME: move to test distributions
|
def test_pickle_gradscaler(self, device):
# This test should pass in 3 cases for cuda:
# 1. cuda is not available.
# 2. cuda is available but device is not cuda.
# 3. cuda is available and device is cuda.
# In case 1, a and b disable themselves on construction and shouldn't try to pickle workhorse attributes.
# In case 2, a and b are enabled. Workhorse attributes participate in pickling, but none are lazy-inited
# to cuda Tensors, because I don't want to do cuda things if device is not cuda.
# In case 3, a and b are enabled and we may also try lazy-initing _scale to a cuda tensor.
device = torch.device(device)
try_lazy_inits = (True, False)
GradScaler = partial(torch.GradScaler, device=device.type)
for lazy_init_scale in try_lazy_inits:
a = GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
if device.type == "cuda":
self.assertTrue(not a.is_enabled() if torch.cuda.amp.common.amp_definitely_not_available() else a.is_enabled())
else:
self.assertTrue(a.is_enabled())
if lazy_init_scale:
# Dummy a.scale() call lazy-inits a._scale Tensor.
a.scale(torch.tensor([4.0], dtype=torch.float32, device=device))
self.assertTrue(a._scale.device.type == device.type)
# The following three lines should work whether or not cuda is available.
serialized = pickle.dumps(a)
b = pickle.loads(serialized)
self.assertEqual(b.is_enabled(), a.is_enabled())
if a.is_enabled():
self.assertEqual(b.get_scale(), 3.)
self.assertEqual(b.get_growth_factor(), 4.)
self.assertEqual(b.get_backoff_factor(), .5)
self.assertEqual(b.get_growth_interval(), 2)
self.assertEqual(b._init_growth_tracker, 0)
# supplies a dummy key to test the defaultdict's default_factory
self.assertEqual(b._per_optimizer_states["fdsa"],
torch.amp.grad_scaler._refresh_per_optimizer_state())
if lazy_init_scale:
self.assertEqual(b.scale(torch.tensor([4.0], dtype=torch.float32, device=device)), 12.0)
# FIXME: move to test distributions
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
class TestTorchDeviceType(TestCase):
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
class TestTorchDeviceType(TestCase):
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_index_add_correctness
|
def test_index_add_correctness(self):
# Check whether index_add can get correct result when
# alpha is 1, and dtype of index is torch.long,
# i.e., using scatter_add
def helper(dim, dtype, device, size_result, size_source):
tensor = torch.zeros(size_result, dtype=dtype, device=device)
index = torch.randint(0, size_result[dim], (size_source[dim],),
dtype=torch.long, device=device)
if dtype.is_floating_point or dtype.is_complex:
source = torch.rand(size_source, dtype=dtype, device=device)
elif dtype.is_signed:
source = torch.randint(-2, 5, size_source, dtype=dtype, device=device)
else:
source = torch.randint(0, 5, size_source, dtype=dtype, device=device)
ref_out = tensor.index_add(dim, index, source, alpha=2.) / 2.
ref_out = ref_out.to(dtype=dtype)
out = tensor.index_add(dim, index, source)
if device == 'cuda':
self.assertEqual(out, ref_out, atol=1e-2, rtol=1e-2)
else:
self.assertEqual(out, ref_out.to(dtype=dtype))
for dim in [-1, -2, -3]:
for dtype in all_types_and_complex_and(torch.half, torch.bfloat16):
for device in get_all_device_types():
for size in [(2, 512, 256), (5, 256, 256)]:
helper(dim, dtype, device, size, size)
# Check broadcast cases on CPU
size_result = (2, 512, 256)
size_source = (1, 512, 256)
helper(dim, dtype, 'cpu', size_result, size_source)
size_result = (2, 512, 512)
size_source = (1, 512, 1)
helper(dim, dtype, 'cpu', size_result, size_source)
size_result = (2, 512, 256)
size_source = (2, 1, 256)
helper(dim, dtype, 'cpu', size_result, size_source)
# Check bound
result = torch.zeros(1, 512, 256, dtype=dtype)
source = torch.ones(1, 512, 256, dtype=dtype)
index = torch.ones(257).to(dtype=torch.long)
self.assertRaises(RuntimeError, lambda: result.index_add_(dim, index, source))
index = (torch.ones(256) * 257).to(dtype=torch.long)
self.assertRaises(RuntimeError, lambda: result.index_add_(dim, index, source))
# FIXME: move to shape ops test suite
|
def test_index_add_correctness(self):
# Check whether index_add can get correct result when
# alpha is 1, and dtype of index is torch.long,
# i.e., using scatter_add
def helper(dim, dtype, device, size_result, size_source):
tensor = torch.zeros(size_result, dtype=dtype, device=device)
index = torch.randint(0, size_result[dim], (size_source[dim],),
dtype=torch.long, device=device)
if dtype.is_floating_point or dtype.is_complex:
source = torch.rand(size_source, dtype=dtype, device=device)
elif dtype.is_signed:
source = torch.randint(-2, 5, size_source, dtype=dtype, device=device)
else:
source = torch.randint(0, 5, size_source, dtype=dtype, device=device)
ref_out = tensor.index_add(dim, index, source, alpha=2.) / 2.
ref_out = ref_out.to(dtype=dtype)
out = tensor.index_add(dim, index, source)
if device == 'cuda':
self.assertEqual(out, ref_out, atol=1e-2, rtol=1e-2)
else:
# scatter_add uses fp32 as accumulate type, while index_add doesn't.
self.assertEqual(out, ref_out.to(dtype=dtype), atol=1e-2, rtol=1e-2)
for dim in [-1, -2, -3]:
for dtype in all_types_and_complex_and(torch.half, torch.bfloat16):
for device in get_all_device_types():
for size in [(2, 512, 256), (5, 256, 256)]:
helper(dim, dtype, device, size, size)
# Check bound
result = torch.zeros(1, 512, 256, dtype=dtype)
source = torch.ones(1, 512, 256, dtype=dtype)
index = torch.ones(257).to(dtype=torch.long)
self.assertRaises(RuntimeError, lambda: result.index_add_(dim, index, source))
index = (torch.ones(256) * 257).to(dtype=torch.long)
self.assertRaises(RuntimeError, lambda: result.index_add_(dim, index, source))
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_index_add_cornercase
|
def test_index_add_cornercase(self):
for device in get_all_device_types():
dest = torch.randn((), device=device)
index = torch.tensor([0], device=device)
source = torch.randn(1, 1, 1, device=device)
with self.assertRaisesRegex(
RuntimeError,
r"source tensor shape must match self tensor shape, excluding the specified dimension",
):
dest.index_add(0, index, source)
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
test_linspace_logspace
|
# FIXME: move to shape ops test suite
|
def test_linspace_logspace(self):
# Ensure the output does not require grad regardless of inputs requiring gard or not.
# The output of factory functions should not be part of any computational graph.
start = 0.0
end = 3.0
for step in [0, 1, 2]:
self.assertFalse(
torch.linspace(
torch.tensor(start, requires_grad=True),
torch.tensor(end, requires_grad=True), step
).requires_grad
)
self.assertFalse(torch.linspace(torch.tensor(start, requires_grad=True), end, step).requires_grad)
self.assertFalse(torch.linspace(start, torch.tensor(end, requires_grad=True), step).requires_grad)
self.assertFalse(
torch.logspace(
torch.tensor(start, requires_grad=True),
torch.tensor(end, requires_grad=True), step
).requires_grad
)
self.assertFalse(torch.logspace(torch.tensor(start, requires_grad=True), end, step).requires_grad)
self.assertFalse(torch.logspace(start, torch.tensor(end, requires_grad=True), step).requires_grad)
# FIXME: move to shape ops test suite
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_torch.py
|
test_equal
|
def test_equal(self):
# Contiguous, 1D
t1 = torch.tensor((3., 4., 9., 10.))
t2 = t1.contiguous()
t3 = torch.tensor((1., 9., 3., 10.))
t4 = torch.tensor((3., 4., 9.))
t5 = torch.tensor([])
self.assertTrue(t1.equal(t2))
self.assertFalse(t1.equal(t3))
self.assertFalse(t1.equal(t4))
self.assertFalse(t1.equal(t5))
self.assertTrue(torch.equal(t1, t2))
self.assertFalse(torch.equal(t1, t3))
self.assertFalse(torch.equal(t1, t4))
self.assertFalse(torch.equal(t1, t5))
# Non contiguous, 2D
s = torch.tensor(((1, 2, 3, 4), (5, 6, 7, 8)))
s1 = s[:, 1:3]
s2 = s1.clone()
s3 = torch.tensor(((2, 3), (6, 7)))
s4 = torch.tensor(((0, 0), (0, 0)))
self.assertFalse(s1.is_contiguous())
self.assertTrue(s1.equal(s2))
self.assertTrue(s1.equal(s3))
self.assertFalse(s1.equal(s4))
self.assertTrue(torch.equal(s1, s2))
self.assertTrue(torch.equal(s1, s3))
self.assertFalse(torch.equal(s1, s4))
# Different dtypes
x = torch.tensor((1, 2, 3), dtype=torch.float)
y = torch.tensor((1, 2, 3), dtype=torch.int)
z = torch.tensor((1, -1), dtype=torch.int)
self.assertTrue(torch.equal(x, y))
self.assertFalse(torch.equal(z, x))
|
def test_equal(self):
devices = [torch.cpu, torch.cuda]
for device in ["cpu", "cuda"]:
if device == "cuda" and not torch.cuda.is_available():
continue
# Contiguous, 1D
t1 = torch.tensor((3., 4., 9., 10.), device=device)
t2 = t1.contiguous()
t3 = torch.tensor((1., 9., 3., 10.), device=device)
t4 = torch.tensor((3., 4., 9.), device=device)
t5 = torch.tensor([], device=device)
self.assertTrue(t1.equal(t2))
self.assertFalse(t1.equal(t3))
self.assertFalse(t1.equal(t4))
self.assertFalse(t1.equal(t5))
self.assertTrue(torch.equal(t1, t2))
self.assertFalse(torch.equal(t1, t3))
self.assertFalse(torch.equal(t1, t4))
self.assertFalse(torch.equal(t1, t5))
# Non contiguous, 2D
s = torch.tensor(((1, 2, 3, 4), (5, 6, 7, 8)), device=device)
s1 = s[:, 1:3]
s2 = s1.clone()
s3 = torch.tensor(((2, 3), (6, 7)), device=device)
s4 = torch.tensor(((0, 0), (0, 0)), device=device)
self.assertFalse(s1.is_contiguous())
self.assertTrue(s1.equal(s2))
self.assertTrue(s1.equal(s3))
self.assertFalse(s1.equal(s4))
self.assertTrue(torch.equal(s1, s2))
self.assertTrue(torch.equal(s1, s3))
self.assertFalse(torch.equal(s1, s4))
# Different dtypes
x = torch.tensor((1, 2, 3), dtype=torch.float, device=device)
y = torch.tensor((1, 2, 3), dtype=torch.int, device=device)
z = torch.tensor((1, -1), dtype=torch.int, device=device)
self.assertTrue(torch.equal(x, y))
self.assertFalse(torch.equal(z, x))
# Fast path test: tensor flags, like neg and conj
neg_0 = torch.tensor((1, 2, 3), dtype=torch.float, device=device)
neg_1 = neg_0._neg_view()
self.assertTrue(neg_1.is_neg())
self.assertEqual(neg_0.data_ptr(), neg_1.data_ptr())
self.assertEqual(neg_0.storage_offset(), neg_1.storage_offset())
self.assertEqual(neg_0.stride(), neg_1.stride())
self.assertEqual(neg_0.size(), neg_1.size())
self.assertFalse(torch.equal(neg_0, neg_1))
# FIXME: Disable the following check due to the inductor failure
# See https://github.com/pytorch/pytorch/issues/100340 and
# https://github.com/pytorch/pytorch/issues/98175
if not TEST_WITH_TORCHINDUCTOR:
self.assertTrue(torch.equal(neg_0, neg_1._neg_view()))
conj_0 = torch.tensor([1.0 + 2.0j, 2.0 + 1.0j], device=device)
conj_1 = conj_0.conj()
self.assertTrue(conj_1.is_conj())
self.assertEqual(conj_0.data_ptr(), conj_1.data_ptr())
self.assertEqual(conj_0.storage_offset(), conj_1.storage_offset())
self.assertEqual(conj_0.stride(), conj_1.stride())
self.assertEqual(conj_0.size(), conj_1.size())
self.assertFalse(torch.equal(conj_0, conj_1))
# FIXME: Disable the following check due to the inductor failure
# See https://github.com/pytorch/pytorch/issues/100340 and
# https://github.com/pytorch/pytorch/issues/98175
if not TEST_WITH_TORCHINDUCTOR:
self.assertTrue(torch.equal(conj_0, conj_1.conj()))
# Fast path test: two tensors share the same storage, but different dtype
s_0 = torch.rand((2, 3), dtype=torch.float, device=device)
s_1 = s_0.view(dtype=torch.int32)
self.assertEqual(s_0.data_ptr(), s_1.data_ptr())
self.assertEqual(s_0.storage_offset(), s_1.storage_offset())
self.assertEqual(s_0.stride(), s_1.stride())
self.assertEqual(s_0.size(), s_1.size())
self.assertFalse(torch.equal(s_0, s_1))
# Fast path test: two tensors share the same storage, but different strides
t_0 = torch.rand((2, 3), dtype=torch.float, device=device)
t_1 = t_0.t()
self.assertEqual(t_0.data_ptr(), t_1.data_ptr())
self.assertEqual(t_0.storage_offset(), t_1.storage_offset())
self.assertNotEqual(t_0.stride(), t_1.stride())
self.assertNotEqual(t_0.size(), t_1.size())
self.assertFalse(torch.equal(t_0, t_1))
# Fast path: tensor containing `nan` is not equal to self
for dtype in floating_and_complex_types():
t = torch.tensor([1., float('nan')], dtype=dtype)
self.assertFalse(torch.equal(t, t))
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_element_size
|
def test_element_size(self):
byte = torch.ByteStorage().element_size()
char = torch.CharStorage().element_size()
short = torch.ShortStorage().element_size()
int = torch.IntStorage().element_size()
long = torch.LongStorage().element_size()
float = torch.FloatStorage().element_size()
double = torch.DoubleStorage().element_size()
bool = torch.BoolStorage().element_size()
bfloat16 = torch.BFloat16Storage().element_size()
complexfloat = torch.ComplexFloatStorage().element_size()
complexdouble = torch.ComplexDoubleStorage().element_size()
self.assertEqual(byte, torch.ByteTensor().element_size())
self.assertEqual(char, torch.CharTensor().element_size())
self.assertEqual(short, torch.ShortTensor().element_size())
self.assertEqual(int, torch.IntTensor().element_size())
self.assertEqual(long, torch.LongTensor().element_size())
self.assertEqual(float, torch.FloatTensor().element_size())
self.assertEqual(double, torch.DoubleTensor().element_size())
self.assertEqual(bool, torch.BoolTensor().element_size())
self.assertEqual(bfloat16, torch.tensor([], dtype=torch.bfloat16).element_size())
self.assertEqual(complexfloat, torch.tensor([], dtype=torch.complex64).element_size())
self.assertEqual(complexdouble, torch.tensor([], dtype=torch.complex128).element_size())
self.assertGreater(byte, 0)
self.assertGreater(char, 0)
self.assertGreater(short, 0)
self.assertGreater(int, 0)
self.assertGreater(long, 0)
self.assertGreater(float, 0)
self.assertGreater(double, 0)
self.assertGreater(bool, 0)
self.assertGreater(bfloat16, 0)
self.assertGreater(complexfloat, 0)
self.assertGreater(complexdouble, 0)
# These tests are portable, not necessarily strict for your system.
self.assertEqual(byte, 1)
self.assertEqual(char, 1)
self.assertEqual(bool, 1)
self.assertGreaterEqual(short, 2)
self.assertGreaterEqual(int, 2)
self.assertGreaterEqual(int, short)
self.assertGreaterEqual(long, 4)
self.assertGreaterEqual(long, int)
self.assertGreaterEqual(double, float)
|
def test_element_size(self):
byte = torch.ByteStorage().element_size()
char = torch.CharStorage().element_size()
short = torch.ShortStorage().element_size()
int = torch.IntStorage().element_size()
long = torch.LongStorage().element_size()
float = torch.FloatStorage().element_size()
double = torch.DoubleStorage().element_size()
bool = torch.BoolStorage().element_size()
bfloat16 = torch.BFloat16Storage().element_size()
complexfloat = torch.ComplexFloatStorage().element_size()
complexdouble = torch.ComplexDoubleStorage().element_size()
self.assertEqual(byte, torch.ByteTensor().element_size())
self.assertEqual(byte, torch.ByteTensor().itemsize)
self.assertEqual(char, torch.CharTensor().element_size())
self.assertEqual(char, torch.CharTensor().itemsize)
self.assertEqual(short, torch.ShortTensor().element_size())
self.assertEqual(short, torch.ShortTensor().itemsize)
self.assertEqual(int, torch.IntTensor().element_size())
self.assertEqual(int, torch.IntTensor().itemsize)
self.assertEqual(long, torch.LongTensor().element_size())
self.assertEqual(long, torch.LongTensor().itemsize)
self.assertEqual(float, torch.FloatTensor().element_size())
self.assertEqual(float, torch.FloatTensor().itemsize)
self.assertEqual(double, torch.DoubleTensor().element_size())
self.assertEqual(double, torch.DoubleTensor().itemsize)
self.assertEqual(bool, torch.BoolTensor().element_size())
self.assertEqual(bool, torch.BoolTensor().itemsize)
self.assertEqual(bfloat16, torch.tensor([], dtype=torch.bfloat16).element_size())
self.assertEqual(bfloat16, torch.tensor([], dtype=torch.bfloat16).itemsize)
self.assertEqual(complexfloat, torch.tensor([], dtype=torch.complex64).element_size())
self.assertEqual(complexfloat, torch.tensor([], dtype=torch.complex64).itemsize)
self.assertEqual(complexdouble, torch.tensor([], dtype=torch.complex128).element_size())
self.assertEqual(complexdouble, torch.tensor([], dtype=torch.complex128).itemsize)
self.assertGreater(byte, 0)
self.assertGreater(char, 0)
self.assertGreater(short, 0)
self.assertGreater(int, 0)
self.assertGreater(long, 0)
self.assertGreater(float, 0)
self.assertGreater(double, 0)
self.assertGreater(bool, 0)
self.assertGreater(bfloat16, 0)
self.assertGreater(complexfloat, 0)
self.assertGreater(complexdouble, 0)
# These tests are portable, not necessarily strict for your system.
self.assertEqual(byte, 1)
self.assertEqual(char, 1)
self.assertEqual(bool, 1)
self.assertGreaterEqual(short, 2)
self.assertGreaterEqual(int, 2)
self.assertGreaterEqual(int, short)
self.assertGreaterEqual(long, 4)
self.assertGreaterEqual(long, int)
self.assertGreaterEqual(double, float)
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_storage_byteswap
|
# Test that internal versions of functions related to TypedStorage do not
# produce a deprecation warning
|
def test_storage_byteswap(self):
input = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
swapped_8bytes = [7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8]
swapped_4bytes = [3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12]
swapped_2bytes = [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]
swapped_1byte = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
storage = torch.storage.TypedStorage(input, dtype=torch.uint8)._untyped_storage
storage_f64 = storage.__copy__()
storage_f64.byteswap(torch.float64)
self.assertEqual(storage_f64.tolist(), swapped_8bytes)
storage_f32 = storage.__copy__()
storage_f32.byteswap(torch.float32)
self.assertEqual(storage_f32.tolist(), swapped_4bytes)
storage_f16 = storage.__copy__()
storage_f16.byteswap(torch.float16)
self.assertEqual(storage_f16.tolist(), swapped_2bytes)
storage_bf16 = storage.__copy__()
storage_bf16.byteswap(torch.bfloat16)
self.assertEqual(storage_bf16.tolist(), swapped_2bytes)
storage_i64 = storage.__copy__()
storage_i64.byteswap(torch.int64)
self.assertEqual(storage_i64.tolist(), swapped_8bytes)
storage_i32 = storage.__copy__()
storage_i32.byteswap(torch.int32)
self.assertEqual(storage_i32.tolist(), swapped_4bytes)
storage_i16 = storage.__copy__()
storage_i16.byteswap(torch.int16)
self.assertEqual(storage_i16.tolist(), swapped_2bytes)
storage_i8 = storage.__copy__()
storage_i8.byteswap(torch.int8)
self.assertEqual(storage_i8.tolist(), swapped_1byte)
storage_ui8 = storage.__copy__()
storage_ui8.byteswap(torch.uint8)
self.assertEqual(storage_ui8.tolist(), swapped_1byte)
storage_bool = storage.__copy__()
storage_bool.byteswap(torch.bool)
self.assertEqual(storage_bool.tolist(), swapped_1byte)
storage_c128 = storage.__copy__()
storage_c128.byteswap(torch.complex128)
self.assertEqual(storage_c128.tolist(), swapped_8bytes)
storage_c64 = storage.__copy__()
storage_c64.byteswap(torch.complex64)
self.assertEqual(storage_c64.tolist(), swapped_4bytes)
# Test that internal versions of functions related to TypedStorage do not
# produce a deprecation warning
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_torch.py
|
test_typed_storage_deprecation_warning
|
def test_typed_storage_deprecation_warning(self):
s0 = torch.FloatStorage(10)
funcs = [
lambda: torch.FloatStorage(),
lambda: torch.FloatStorage.dtype,
lambda: s0.fill_(0),
lambda: s0.is_cuda,
lambda: s0.untyped(),
lambda: len(s0),
lambda: s0[0],
]
if torch.cuda.is_available():
s1 = torch.cuda.FloatStorage(10)
funcs += [
lambda: torch.cuda.FloatStorage(),
lambda: torch.cuda.FloatStorage.dtype,
lambda: s1.fill_(0),
lambda: s1.is_cuda,
lambda: s1.untyped(),
lambda: len(s1),
lambda: s1[0],
]
# Check that each of the TypedStorage function calls produce a warning
# if warnings are reset between each
for f in funcs:
with warnings.catch_warnings(record=True) as w:
warnings.resetwarnings()
f()
self.assertEqual(len(w), 1, msg=str([str(a) for a in w]))
warning = w[0].message
self.assertTrue(warning, DeprecationWarning)
self.assertTrue(re.search(
'^TypedStorage is deprecated',
str(warning)))
|
def test_typed_storage_deprecation_warning(self):
s0 = torch.FloatStorage(10)
funcs = [
lambda: torch.FloatStorage(),
lambda: torch.FloatStorage.dtype,
lambda: s0.fill_(0),
lambda: s0.is_cuda,
lambda: s0.untyped(),
lambda: len(s0),
lambda: s0[0],
]
if torch.cuda.is_available():
s1 = torch.cuda.FloatStorage(10)
funcs += [
lambda: torch.cuda.FloatStorage(),
lambda: torch.cuda.FloatStorage.dtype,
lambda: s1.fill_(0),
lambda: s1.is_cuda,
lambda: s1.untyped(),
lambda: len(s1),
lambda: s1[0],
]
# Check that each of the TypedStorage function calls produce a warning
# if warnings are reset between each
for f in funcs:
with AlwaysWarnTypedStorageRemoval(True):
with warnings.catch_warnings(record=True) as w:
warnings.resetwarnings()
f()
self.assertEqual(len(w), 1, msg=str([str(a) for a in w]))
warning = w[0].message
self.assertTrue(warning, DeprecationWarning)
self.assertTrue(re.search(
'^TypedStorage is deprecated',
str(warning)))
# Test that only the first warning is raised by default
torch.storage._reset_warn_typed_storage_removal()
with warnings.catch_warnings(record=True) as w:
warnings.resetwarnings()
torch.FloatStorage()
torch.randn(10).storage()
self.assertEqual(len(w), 1, msg=str([str(a) for a in w]))
warning = w[0].message
self.assertTrue(re.search(
'^TypedStorage is deprecated',
str(warning)))
# Check the line of code from the warning's stack
with open(w[0].filename, encoding="utf-8") as f:
code_line = f.readlines()[w[0].lineno - 1]
self.assertTrue(re.search(re.escape('torch.FloatStorage()'), code_line))
# Check that warnings are not emitted if it happened in the past
with warnings.catch_warnings(record=True) as w:
warnings.resetwarnings()
torch.FloatStorage()
torch.randn(10).storage()
self.assertEqual(len(w), 0, msg=str([str(a) for a in w]))
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_pin_memory
|
def test_pin_memory(self):
x = torch.randn(3, 5)
self.assertFalse(x.is_pinned())
if not torch.cuda.is_available():
self.assertRaises(RuntimeError, lambda: x.pin_memory())
else:
pinned = x.pin_memory()
self.assertTrue(pinned.is_pinned())
self.assertEqual(pinned, x)
self.assertNotEqual(pinned.data_ptr(), x.data_ptr())
# test that pin_memory on already pinned tensor has no effect
self.assertIs(pinned, pinned.pin_memory())
self.assertEqual(pinned.data_ptr(), pinned.pin_memory().data_ptr())
|
def test_pin_memory(self):
x = torch.randn(3, 5)
self.assertFalse(x.is_pinned())
if torch.cuda.is_available():
pinned = x.pin_memory()
self.assertTrue(pinned.is_pinned())
self.assertEqual(pinned, x)
self.assertNotEqual(pinned.data_ptr(), x.data_ptr())
# test that pin_memory on already pinned tensor has no effect
self.assertIs(pinned, pinned.pin_memory())
self.assertEqual(pinned.data_ptr(), pinned.pin_memory().data_ptr())
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_dim_order
|
def test_dim_order(self):
shape = (2, 3, 5, 7)
t = torch.empty(shape)
self.assertSequenceEqual(t.dim_order(), (0, 1, 2, 3), seq_type=tuple)
# transpose doesn't really change the underlying physical memory
# so expecting dim_order change to reflect that (like strides)
self.assertSequenceEqual(t.transpose(0, 1).dim_order(), (1, 0, 2, 3))
t = torch.empty(shape, memory_format=torch.channels_last)
self.assertSequenceEqual(t.dim_order(), (0, 2, 3, 1))
t = torch.empty((2, 3, 5, 7, 8), memory_format=torch.channels_last_3d)
self.assertSequenceEqual(t.dim_order(), (0, 2, 3, 4, 1))
for dim_order in itertools.permutations(range(4)):
self.assertSequenceEqual(
dim_order, torch.empty_permuted(shape, dim_order).dim_order()
)
for shape in [(2, 2, 2, 2), (2, 1, 2, 2), (2, 2, 1, 2), (2, 2, 2, 1), (2, 2, 1, 1), (2, 1, 1, 2)]:
for memory_format in (torch.contiguous_format, torch.channels_last):
t = torch.empty(shape).to(memory_format=memory_format)
if memory_format == torch.contiguous_format:
dim_order_target = list(range(len(shape)))
elif memory_format == torch.channels_last:
dim_order_target = [0, *list(range(2, len(shape))), 1]
self.assertSequenceEqual(dim_order_target, t.dim_order())
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
test_nbytes
|
def test_nbytes(self):
a = torch.randn(1, 2, 3, dtype=torch.float64)
self.assertEqual(a.numel() * a.element_size(), a.nbytes)
b = torch.randn(())
self.assertEqual(b.numel() * b.element_size(), b.nbytes)
c = torch.randn(1, 0)
self.assertEqual(c.numel() * c.element_size(), c.nbytes)
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
test_upsample_nearest2d_meta
|
def test_upsample_nearest2d_meta(self):
# TODO: the out tests cannot be triggered by test_nn.py because
# we don't actually do out= arguments for nn functions, so there
# is no public API by which to get the out version
# Make sure we don't clobber strides of out tensor. NB: this
# test must be done on 2d/3d, because 1d doesn't have any meaningful
# layout support
x = torch.empty(4, 3, 8, 8, device='meta')
out = torch.empty(4, 3, 16, 16, device='meta', memory_format=torch.channels_last)
torch._C._nn.upsample_nearest2d(x, (16, 16), out=out)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
x = torch.empty(4, 3, 8, 8, device='meta', memory_format=torch.channels_last)
out = torch.empty(4, 3, 16, 16, device='meta')
torch._C._nn.upsample_nearest2d(x, (16, 16), out=out)
self.assertTrue(out.is_contiguous())
# But if resize occurs, do clobber
x = torch.empty(4, 3, 8, 8, device='meta', memory_format=torch.channels_last)
out = torch.empty(0, device='meta')
torch._C._nn.upsample_nearest2d(x, (16, 16), out=out)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
# Complain if out dtype mismatch
x = torch.empty(4, 3, 8, 8, device='meta', dtype=torch.float)
out = torch.empty(4, 3, 16, 16, device='meta', dtype=torch.double)
self.assertExpectedRaisesInline(
RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out),
"""Expected out tensor to have dtype float, but got double instead"""
)
# Complain if out device mismatch
x = torch.empty(0, 3, 8, 8, device='meta')
out = torch.empty(0, 3, 16, 16, device='cpu')
# FIXME: compiling should properly error with a device mismatch.
if not TEST_WITH_TORCHINDUCTOR:
self.assertExpectedRaisesInline(
RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out),
"""Expected out tensor to have device meta, but got cpu instead"""
)
|
def test_upsample_nearest2d_meta(self):
# TODO: the out tests cannot be triggered by test_nn.py because
# we don't actually do out= arguments for nn functions, so there
# is no public API by which to get the out version
# Make sure we don't clobber strides of out tensor. NB: this
# test must be done on 2d/3d, because 1d doesn't have any meaningful
# layout support
x = torch.empty(4, 3, 8, 8, device='meta')
out = torch.empty(4, 3, 16, 16, device='meta', memory_format=torch.channels_last)
torch._C._nn.upsample_nearest2d(x, (16, 16), out=out)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
x = torch.empty(4, 3, 8, 8, device='meta', memory_format=torch.channels_last)
out = torch.empty(4, 3, 16, 16, device='meta')
torch._C._nn.upsample_nearest2d(x, (16, 16), out=out)
self.assertTrue(out.is_contiguous())
# But if resize occurs, do clobber
x = torch.empty(4, 3, 8, 8, device='meta', memory_format=torch.channels_last)
out = torch.empty(0, device='meta')
torch._C._nn.upsample_nearest2d(x, (16, 16), out=out)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
# Complain if out dtype mismatch
x = torch.empty(4, 3, 8, 8, device='meta', dtype=torch.float)
out = torch.empty(4, 3, 16, 16, device='meta', dtype=torch.double)
self.assertExpectedRaisesInline(
RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out),
"""Expected out tensor to have dtype torch.float32 but got torch.float64 instead"""
)
# Complain if out device mismatch
x = torch.empty(0, 3, 8, 8, device='meta')
out = torch.empty(0, 3, 16, 16, device='cpu')
# FIXME: compiling should properly error with a device mismatch.
if not TEST_WITH_TORCHINDUCTOR:
self.assertExpectedRaisesInline(
RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out),
"""Attempting to copy from device meta to device cpu, but cross-device copies are not allowed!"""
)
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_manual_seed
|
def test_manual_seed(self):
rng_state = torch.get_rng_state()
torch.manual_seed(2)
x = torch.randn(100)
self.assertEqual(torch.initial_seed(), 2)
torch.manual_seed(2)
y = torch.randn(100)
self.assertEqual(x, y)
max_int64 = 0x7fff_ffff_ffff_ffff
min_int64 = -max_int64 - 1
max_uint64 = 0xffff_ffff_ffff_ffff
# Check all boundary cases of valid seed value inputs
test_cases = [
# (seed, expected_initial_seed)
# Positive seeds should be unchanged
(max_int64, max_int64),
(max_int64 + 1, max_int64 + 1),
(max_uint64, max_uint64),
(0, 0),
# Negative seeds wrap around starting from the largest seed value
(-1, max_uint64),
(min_int64, max_int64 + 1)
]
for seed, expected_initial_seed in test_cases:
torch.manual_seed(seed)
actual_initial_seed = torch.initial_seed()
msg = "expected initial_seed() = %x after calling manual_seed(%x), but got %x instead" % (
expected_initial_seed, seed, actual_initial_seed)
self.assertEqual(expected_initial_seed, actual_initial_seed, msg=msg)
for invalid_seed in [min_int64 - 1, max_uint64 + 1]:
with self.assertRaisesRegex(RuntimeError, r'Overflow when unpacking long'):
torch.manual_seed(invalid_seed)
torch.set_rng_state(rng_state)
# FIXME: Describe this test and port to the generic device framework in a more
# appropriate test suite for the copy operation
|
def test_manual_seed(self):
rng_state = torch.get_rng_state()
torch.manual_seed(2)
x = torch.randn(100)
self.assertEqual(torch.initial_seed(), 2)
torch.manual_seed(2)
y = torch.randn(100)
self.assertEqual(x, y)
max_int64 = 0x7fff_ffff_ffff_ffff
min_int64 = -max_int64 - 1
max_uint64 = 0xffff_ffff_ffff_ffff
# Check all boundary cases of valid seed value inputs
test_cases = [
# (seed, expected_initial_seed)
# Positive seeds should be unchanged
(max_int64, max_int64),
(max_int64 + 1, max_int64 + 1),
(max_uint64, max_uint64),
(0, 0),
# Negative seeds wrap around starting from the largest seed value
(-1, max_uint64),
(min_int64, max_int64 + 1)
]
for seed, expected_initial_seed in test_cases:
torch.manual_seed(seed)
actual_initial_seed = torch.initial_seed()
msg = (f"expected initial_seed() = {expected_initial_seed:x} "
f"after calling manual_seed({seed:x}), but got {actual_initial_seed:x} instead")
self.assertEqual(expected_initial_seed, actual_initial_seed, msg=msg)
for invalid_seed in [min_int64 - 1, max_uint64 + 1]:
with self.assertRaisesRegex(RuntimeError, r'Overflow when unpacking long'):
torch.manual_seed(invalid_seed)
torch.set_rng_state(rng_state)
# FIXME: Describe this test and port to the generic device framework in a more
# appropriate test suite for the copy operation
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_copy_behavior
|
def test_copy_behavior(t, non_blocking=False):
self.assertIs(t, t.to(t, non_blocking=non_blocking))
self.assertIs(t, t.to(t.dtype, non_blocking=non_blocking))
self.assertIs(t, t.to(torch.empty_like(t), non_blocking=non_blocking))
self.assertIsNot(t, t.to(t, non_blocking=non_blocking, copy=True))
self.assertIsNot(t, t.to(t.dtype, non_blocking=non_blocking, copy=True))
self.assertIsNot(t, t.to(torch.empty_like(t), non_blocking=non_blocking, copy=True))
devices = [t.device]
if t.device.type == 'cuda':
if t.device.index == -1:
devices.append('cuda:{}'.format(torch.cuda.current_device()))
elif t.device.index == torch.cuda.current_device():
devices.append('cuda')
for device in devices:
self.assertIs(t, t.to(device, non_blocking=non_blocking))
self.assertIs(t, t.to(device, t.dtype, non_blocking=non_blocking))
self.assertIsNot(t, t.to(device, non_blocking=non_blocking, copy=True))
self.assertIsNot(t, t.to(device, t.dtype, non_blocking=non_blocking, copy=True))
a = torch.tensor(5)
if layout == torch.sparse_csr:
a = torch.tensor([[0, 1, 2], [2, 0, 3]]).to_sparse_csr()
test_copy_behavior(a)
self.assertEqual(a.device, a.to('cpu').device)
self.assertEqual(a.device, a.to('cpu', dtype=torch.float32).device)
self.assertIs(torch.float32, a.to('cpu', dtype=torch.float32).dtype)
self.assertEqual(a.device, a.to(torch.float32).device)
self.assertIs(torch.float32, a.to(dtype=torch.float32).dtype)
|
def test_copy_behavior(t, non_blocking=False):
self.assertIs(t, t.to(t, non_blocking=non_blocking))
self.assertIs(t, t.to(t.dtype, non_blocking=non_blocking))
self.assertIs(t, t.to(torch.empty_like(t), non_blocking=non_blocking))
self.assertIsNot(t, t.to(t, non_blocking=non_blocking, copy=True))
self.assertIsNot(t, t.to(t.dtype, non_blocking=non_blocking, copy=True))
self.assertIsNot(t, t.to(torch.empty_like(t), non_blocking=non_blocking, copy=True))
devices = [t.device]
if t.device.type == 'cuda':
if t.device.index == -1:
devices.append(f'cuda:{torch.cuda.current_device()}')
elif t.device.index == torch.cuda.current_device():
devices.append('cuda')
for device in devices:
self.assertIs(t, t.to(device, non_blocking=non_blocking))
self.assertIs(t, t.to(device, t.dtype, non_blocking=non_blocking))
self.assertIsNot(t, t.to(device, non_blocking=non_blocking, copy=True))
self.assertIsNot(t, t.to(device, t.dtype, non_blocking=non_blocking, copy=True))
a = torch.tensor(5)
if layout == torch.sparse_csr:
a = torch.tensor([[0, 1, 2], [2, 0, 3]]).to_sparse_csr()
test_copy_behavior(a)
self.assertEqual(a.device, a.to('cpu').device)
self.assertEqual(a.device, a.to('cpu', dtype=torch.float32).device)
self.assertIs(torch.float32, a.to('cpu', dtype=torch.float32).dtype)
self.assertEqual(a.device, a.to(torch.float32).device)
self.assertIs(torch.float32, a.to(dtype=torch.float32).dtype)
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_split_with_sizes_copy_out
|
def test_split_with_sizes_copy_out(self):
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
shape = (30, 40, 50)
x = torch.rand(*shape, device=device)
cases = [
(0, [3, 7, 8, 12]),
(1, [3, 7, 10, 20]),
(-2, [3, 7, 10, 20]),
(2, [3, 7, 10, 12, 18]),
(-1, [3, 7, 10, 12, 18]),
(2, [3, 7, 10, 0, 30]),
]
for dim, split_sizes in cases:
views = x.split_with_sizes(split_sizes, dim=dim)
expects = [v.clone() for v in views]
out = [torch.zeros_like(v) for v in views]
for expect, t in zip(expects, out):
if expect.numel() != 0:
self.assertFalse(expect.eq(t).all().item())
torch.split_with_sizes_copy(x, split_sizes, dim=dim, out=out)
for expect, t in zip(expects, out):
self.assertTrue(expect.eq(t).all().item())
if not torch.cuda.is_available():
continue
# Test with cuda graph
out = [torch.zeros_like(v) for v in views]
for expect, t in zip(expects, out):
if expect.numel() != 0:
self.assertFalse(expect.eq(t).all().item())
g = torch.cuda.CUDAGraph()
with torch.cuda.graph(g):
torch.split_with_sizes_copy(x, split_sizes, dim=dim, out=out)
g.replay()
for expect, t in zip(expects, out):
self.assertTrue(expect.eq(t).all().item())
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
test_storage_dealloc_subclass_resurrected
|
def test_storage_dealloc_subclass_resurrected(self):
class MyStorage(torch.UntypedStorage):
finalized_count = 0
def __del__(self):
MyStorage.finalized_count += 1
m, t = Tracker.make()
s = MyStorage(10)
s._tracker = t
del t
a = torch.tensor(s)
self.assertFalse(m[0])
del s
self.assertEqual(MyStorage.finalized_count, 0)
self.assertFalse(m[0])
s = a.untyped_storage()
del a
self.assertFalse(m[0])
self.assertEqual(MyStorage.finalized_count, 0)
self.assertTrue(isinstance(s, MyStorage))
del s
self.assertEqual(MyStorage.finalized_count, 1)
self.assertTrue(m[0])
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
test_doc
|
def test_doc(self):
checked_types = (types.MethodType, types.FunctionType,
types.BuiltinFunctionType, types.BuiltinMethodType)
def _test_namespace(ns, *skips):
if isinstance(ns, object):
ns_name = ns.__class__.__name__
else:
ns_name = ns.__name__
skip_regexes = []
for r in skips:
if isinstance(r, str):
skip_regexes.append(re.compile('^{}$'.format(re.escape(r))))
else:
skip_regexes.append(r)
for name in dir(ns):
if name.startswith('_'):
continue
if name in ['real', 'imag']:
y = torch.randn(1, dtype=torch.cfloat)
var = getattr(y, name)
elif name in ["H", "mT", "mH"]:
y = torch.randn(1, 1)
var = getattr(y, name)
else:
var = getattr(ns, name)
if not isinstance(var, checked_types):
continue
doc = var.__doc__
has_doc = doc is not None and len(doc.strip()) > 0
full_name = ns_name + '.' + name
if any(r.match(name) for r in skip_regexes):
self.assertFalse(has_doc,
'New docs have been added for {}, please remove '
'it from the skipped list in TestTorch.test_doc'.format(full_name))
else:
self.assertTrue(has_doc, '{} is missing documentation'.format(full_name))
# FIXME: All of the following should be marked as expected failures
# so that it is easier to tell when missing has been added.
# FIXME: fix all the skipped ones below!
test_namespace(torch.randn(1),
'as_strided_',
re.compile('^clamp_(min|max)_?$'),
'is_distributed',
'is_nonzero',
'is_same_size',
'log_softmax',
'map2_',
'new',
'reinforce',
'relu',
'relu_',
'prelu',
'resize',
'resize_as',
'softmax',
'split_with_sizes',
'unsafe_split_with_sizes',
'_autocast_to_fp16',
'_autocast_to_fp32',
)
test_namespace(torch.nn)
test_namespace(torch.nn.functional, 'assert_int_or_pair')
# TODO: add torch.* tests when we have proper namespacing on ATen functions
# test_namespace(torch)
# FIXME: deprecate torch.Tensor constructor
|
def test_doc(self):
checked_types = (types.MethodType, types.FunctionType,
types.BuiltinFunctionType, types.BuiltinMethodType)
def _test_namespace(ns, *skips):
if isinstance(ns, object):
ns_name = ns.__class__.__name__
else:
ns_name = ns.__name__
skip_regexes = []
for r in skips:
if isinstance(r, str):
skip_regexes.append(re.compile(f'^{re.escape(r)}$'))
else:
skip_regexes.append(r)
for name in dir(ns):
if name.startswith('_'):
continue
if name in ['real', 'imag']:
y = torch.randn(1, dtype=torch.cfloat)
var = getattr(y, name)
elif name in ["H", "mT", "mH"]:
y = torch.randn(1, 1)
var = getattr(y, name)
else:
var = getattr(ns, name)
if not isinstance(var, checked_types):
continue
doc = var.__doc__
has_doc = doc is not None and len(doc.strip()) > 0
full_name = ns_name + '.' + name
if any(r.match(name) for r in skip_regexes):
self.assertFalse(has_doc,
f'New docs have been added for {full_name}, please remove '
'it from the skipped list in TestTorch.test_doc')
else:
self.assertTrue(has_doc, f'{full_name} is missing documentation')
# FIXME: All of the following should be marked as expected failures
# so that it is easier to tell when missing has been added.
# FIXME: fix all the skipped ones below!
test_namespace(torch.randn(1), # noqa: F821
'as_strided_',
re.compile('^clamp_(min|max)_?$'),
'is_distributed',
'is_nonzero',
'is_same_size',
'log_softmax',
'map2_',
'new',
'reinforce',
'relu',
'relu_',
'prelu',
'resize',
'resize_as',
'softmax',
'split_with_sizes',
'unsafe_split_with_sizes',
'_autocast_to_fp16',
'_autocast_to_fp32',
)
test_namespace(torch.nn) # noqa: F821
test_namespace(torch.nn.functional, 'assert_int_or_pair') # noqa: F821
# TODO: add torch.* tests when we have proper namespacing on ATen functions
# test_namespace(torch)
# FIXME: deprecate torch.Tensor constructor
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
_test_namespace
|
def _test_namespace(ns, *skips):
if isinstance(ns, object):
ns_name = ns.__class__.__name__
else:
ns_name = ns.__name__
skip_regexes = []
for r in skips:
if isinstance(r, str):
skip_regexes.append(re.compile('^{}$'.format(re.escape(r))))
else:
skip_regexes.append(r)
for name in dir(ns):
if name.startswith('_'):
continue
if name in ['real', 'imag']:
y = torch.randn(1, dtype=torch.cfloat)
var = getattr(y, name)
elif name in ["H", "mT", "mH"]:
y = torch.randn(1, 1)
var = getattr(y, name)
else:
var = getattr(ns, name)
if not isinstance(var, checked_types):
continue
doc = var.__doc__
has_doc = doc is not None and len(doc.strip()) > 0
full_name = ns_name + '.' + name
if any(r.match(name) for r in skip_regexes):
self.assertFalse(has_doc,
'New docs have been added for {}, please remove '
'it from the skipped list in TestTorch.test_doc'.format(full_name))
else:
self.assertTrue(has_doc, '{} is missing documentation'.format(full_name))
# FIXME: All of the following should be marked as expected failures
# so that it is easier to tell when missing has been added.
# FIXME: fix all the skipped ones below!
test_namespace(torch.randn(1),
'as_strided_',
re.compile('^clamp_(min|max)_?$'),
'is_distributed',
'is_nonzero',
'is_same_size',
'log_softmax',
'map2_',
'new',
'reinforce',
'relu',
'relu_',
'prelu',
'resize',
'resize_as',
'softmax',
'split_with_sizes',
'unsafe_split_with_sizes',
'_autocast_to_fp16',
'_autocast_to_fp32',
)
test_namespace(torch.nn)
test_namespace(torch.nn.functional, 'assert_int_or_pair')
# TODO: add torch.* tests when we have proper namespacing on ATen functions
# test_namespace(torch)
# FIXME: deprecate torch.Tensor constructor
|
def _test_namespace(ns, *skips):
if isinstance(ns, object):
ns_name = ns.__class__.__name__
else:
ns_name = ns.__name__
skip_regexes = []
for r in skips:
if isinstance(r, str):
skip_regexes.append(re.compile(f'^{re.escape(r)}$'))
else:
skip_regexes.append(r)
for name in dir(ns):
if name.startswith('_'):
continue
if name in ['real', 'imag']:
y = torch.randn(1, dtype=torch.cfloat)
var = getattr(y, name)
elif name in ["H", "mT", "mH"]:
y = torch.randn(1, 1)
var = getattr(y, name)
else:
var = getattr(ns, name)
if not isinstance(var, checked_types):
continue
doc = var.__doc__
has_doc = doc is not None and len(doc.strip()) > 0
full_name = ns_name + '.' + name
if any(r.match(name) for r in skip_regexes):
self.assertFalse(has_doc,
f'New docs have been added for {full_name}, please remove '
'it from the skipped list in TestTorch.test_doc')
else:
self.assertTrue(has_doc, f'{full_name} is missing documentation')
# FIXME: All of the following should be marked as expected failures
# so that it is easier to tell when missing has been added.
# FIXME: fix all the skipped ones below!
test_namespace(torch.randn(1), # noqa: F821
'as_strided_',
re.compile('^clamp_(min|max)_?$'),
'is_distributed',
'is_nonzero',
'is_same_size',
'log_softmax',
'map2_',
'new',
'reinforce',
'relu',
'relu_',
'prelu',
'resize',
'resize_as',
'softmax',
'split_with_sizes',
'unsafe_split_with_sizes',
'_autocast_to_fp16',
'_autocast_to_fp32',
)
test_namespace(torch.nn) # noqa: F821
test_namespace(torch.nn.functional, 'assert_int_or_pair') # noqa: F821
# TODO: add torch.* tests when we have proper namespacing on ATen functions
# test_namespace(torch)
# FIXME: deprecate torch.Tensor constructor
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_tensor_base_init
|
def test_tensor_base_init(self):
# Direct construction not OK
self.assertRaises(RuntimeError, lambda: torch._C._TensorBase())
# But construction of subclass is OK
class T(torch._C._TensorBase):
pass
T()
|
def test_tensor_base_init(self):
# Direct construction not OK
self.assertRaises(RuntimeError, lambda: torch._C.TensorBase())
# Subclassing it directly no OK
with self.assertRaisesRegex(RuntimeError, "Cannot subclass"):
class Tfail(torch._C.TensorBase):
pass
# Doing so with Tensor is ok though
class T(torch.Tensor):
pass
T()
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_storage_base_init
|
# But construction of subclass is OK
class T(torch._C._TensorBase):
pass
T()
|
def test_storage_base_init(self):
# Direct construction not OK
self.assertRaises(RuntimeError, lambda: torch._C.StorageBase())
# But construction of subclass is OK
class T(torch._C.StorageBase):
pass
T()
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_torch.py
|
test_tensor_base_new
|
def test_tensor_base_new(self):
# OK to call super().__new__, see
# https://github.com/pytorch/pytorch/issues/57421
class TestTensor(torch._C._TensorBase):
@staticmethod
def __new__(cls, x, *args, **kwargs):
return super().__new__(cls, x, *args, **kwargs)
x = torch.ones(5)
test_tensor = TestTensor(x)
|
def test_tensor_base_new(self):
# OK to call super().__new__, see
# https://github.com/pytorch/pytorch/issues/57421
class TestTensor(torch.Tensor):
@staticmethod
def __new__(cls, x, *args, **kwargs):
return super().__new__(cls, x, *args, **kwargs)
x = torch.ones(5)
test_tensor = TestTensor(x)
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_storage_base_new
|
def test_storage_base_new(self):
# OK to call super().__new__, see
# https://github.com/pytorch/pytorch/issues/57421
class TestStorage(torch._C.StorageBase):
@staticmethod
def __new__(cls, x, *args, **kwargs):
return super().__new__(cls, x, *args, **kwargs)
x = torch.UntypedStorage(5)
test_storage = TestStorage(x)
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
_spawn_method
|
def _spawn_method(self, method, arg):
try:
mp.set_start_method('spawn')
except RuntimeError:
pass
with mp.Pool(1) as pool:
out: list = pool.map(method, [arg])
self.assertTrue(out[0])
|
def _spawn_method(self, method, arg):
try:
mp.set_start_method('spawn')
except RuntimeError:
pass
with mp.Pool(1) as pool:
out = pool.map(method, [arg])
self.assertTrue(out[0])
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
from torch import inf, nan
from itertools import product, combinations, permutations
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TEST_WITH_TORCHINDUCTOR, TestCase, TEST_WITH_ROCM, run_tests,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, skipIfTorchInductor, slowTest,
TEST_WITH_CROSSREF, skipIfTorchDynamo,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta,
PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN)
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types,
)
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
import weakref
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_torch.py
|
test_deterministic_fill_uninitialized_memory
|
def test_deterministic_fill_uninitialized_memory(self):
with DeterministicGuard(True, fill_uninitialized_memory=False):
self.assertFalse(torch.utils.deterministic.fill_uninitialized_memory)
self.assertFalse(torch._C._get_deterministic_fill_uninitialized_memory())
with DeterministicGuard(True, fill_uninitialized_memory=True):
self.assertTrue(torch.utils.deterministic.fill_uninitialized_memory)
self.assertTrue(torch._C._get_deterministic_fill_uninitialized_memory())
self.assertFalse(torch.utils.deterministic.fill_uninitialized_memory)
self.assertFalse(torch._C._get_deterministic_fill_uninitialized_memory())
torch.utils.deterministic.fill_uninitialized_memory = False
self.assertFalse(torch.utils.deterministic.fill_uninitialized_memory)
self.assertFalse(torch._C._get_deterministic_fill_uninitialized_memory())
torch.utils.deterministic.fill_uninitialized_memory = True
self.assertTrue(torch.utils.deterministic.fill_uninitialized_memory)
self.assertTrue(torch._C._get_deterministic_fill_uninitialized_memory())
torch._C._set_deterministic_fill_uninitialized_memory(False)
self.assertFalse(torch.utils.deterministic.fill_uninitialized_memory)
self.assertFalse(torch._C._get_deterministic_fill_uninitialized_memory())
torch._C._set_deterministic_fill_uninitialized_memory(True)
self.assertTrue(torch.utils.deterministic.fill_uninitialized_memory)
self.assertTrue(torch._C._get_deterministic_fill_uninitialized_memory())
with self.assertRaisesRegex(RuntimeError, r"expected a bool, but got int"):
torch.utils.deterministic.fill_uninitialized_memory = 1
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
test_storage_dict_dealloc
|
def test_storage_dict_dealloc(self):
m, t = Tracker.make()
x = torch.UntypedStorage(2)
x.arf = t
del t
self.assertFalse(m[0])
del x
self.assertTrue(m[0])
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
test_decoder_padding_and_src_mask_bool
|
def test_decoder_padding_and_src_mask_bool(self):
def transformer_decoder(inputs, input_seq_len, memory):
decoder_layer = nn.TransformerDecoderLayer(
d_model=16,
nhead=2,
dim_feedforward=32,
dropout=0.1,
activation='relu',
batch_first=True,
)
decoder_norm = nn.LayerNorm(16)
decoder = nn.TransformerDecoder(
decoder_layer, 2, decoder_norm
)
src_mask = torch.ones(
inputs.shape[1], inputs.shape[1], dtype=torch.bool
).triu_(diagonal=1)
padding_mask = (
torch.arange(inputs.shape[1])[None, :].cpu()
>= input_seq_len[:, None]
)
return decoder(
inputs,
memory,
tgt_mask=src_mask,
tgt_key_padding_mask=padding_mask,
memory_key_padding_mask=padding_mask,
)
inputs = torch.randn(2, 3, 16)
memory = torch.randn(2, 3, 16)
input_seq_len = torch.tensor([3, 2])
with self.assertNoLogs(None):
transformer_decoder(inputs, input_seq_len, memory)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
class TestTransformers(NNTestCase):
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
transformer_decoder
|
def transformer_decoder(inputs, input_seq_len, memory):
decoder_layer = nn.TransformerDecoderLayer(
d_model=16,
nhead=2,
dim_feedforward=32,
dropout=0.1,
activation='relu',
batch_first=True,
)
decoder_norm = nn.LayerNorm(16)
decoder = nn.TransformerDecoder(
decoder_layer, 2, decoder_norm
)
src_mask = torch.ones(
inputs.shape[1], inputs.shape[1], dtype=torch.bool
).triu_(diagonal=1)
padding_mask = (
torch.arange(inputs.shape[1])[None, :].cpu()
>= input_seq_len[:, None]
)
return decoder(
inputs,
memory,
tgt_mask=src_mask,
tgt_key_padding_mask=padding_mask,
memory_key_padding_mask=padding_mask,
)
inputs = torch.randn(2, 3, 16)
memory = torch.randn(2, 3, 16)
input_seq_len = torch.tensor([3, 2])
with self.assertNoLogs(None):
transformer_decoder(inputs, input_seq_len, memory)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
test_mha_native_args
|
def test_mha_native_args(self, nb_heads, bias):
B, L, F = 8, 100, 128
batch_first = True
fast_path = True
use_pad_mask = (bias % 2) == 1
mha = nn.MultiheadAttention(
embed_dim=F,
num_heads=nb_heads,
batch_first=batch_first,
bias=bias
).cuda()
mha.eval()
ctx = torch.no_grad if fast_path else contextlib.nullcontext
with ctx():
x = torch.randn(B, L, F).cuda()
if not batch_first:
x = x.transpose(0, 1)
pad_mask = None
if use_pad_mask:
pad_mask = torch.zeros((B, L), dtype=torch.bool).cuda()
mha(query=x, key=x, value=x, key_padding_mask=pad_mask)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
class TestTransformers(NNTestCase):
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
test_with_nested_tensor_input
|
def test_with_nested_tensor_input(self, device):
encoder_layer = nn.TransformerEncoderLayer(
d_model=256,
nhead=4,
dim_feedforward=512,
activation='gelu',
norm_first=False,
batch_first=True,
)
transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=3, enable_nested_tensor=True).to(device)
transformer_encoder.eval()
with torch.no_grad():
x = torch.randn(6, 10, 256).to(device)
mask = torch.ones(6, 10)
mask[0, 0:] = 0 # here I masked 5 columns instead of just one
mask[2, 2:] = 0 # here I masked 5 columns instead of just one
mask[4, 4:] = 0 # here I masked 5 columns instead of just one
mask[5, 8:] = 0 # here I masked 5 columns instead of just one
mask = mask.bool().to(device)
x = torch._nested_tensor_from_mask(x, mask.logical_not(), mask_check=False)
out = transformer_encoder(src=x, src_key_padding_mask=None)
self.assertEqual(out.is_nested, True)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
class TestTransformers(NNTestCase):
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
test_script_encoder_subclass
|
def test_script_encoder_subclass(self, device):
class MyCustomLayer(nn.TransformerEncoderLayer):
pass
encoder = nn.TransformerEncoder(
MyCustomLayer(d_model=256, nhead=8), num_layers=6
).to(device=device)
torch.jit.script(encoder)
# brazenly adapted from test_transformerencoderlayer_src_mask to test execution of
# torchscripted transformerencoderlayer subclass
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
class TestTransformers(NNTestCase):
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
test_transformerencoderlayer_subclass
|
def test_transformerencoderlayer_subclass(self, device):
class MyCustomLayer(nn.TransformerEncoderLayer):
pass
nhead = 4
batch_size = 2
seqlen = 4
d_model = 8
dim_feedforward = 32
model = MyCustomLayer(
d_model=d_model,
nhead=nhead,
dim_feedforward=dim_feedforward,
batch_first=True).to(device)
script_model = torch.jit.script(model)
src = torch.rand(batch_size, seqlen, d_model).to(device) # bs, seqlen, d_model
src_mask = torch.zeros(seqlen, seqlen).to(torch.bool).to(device)
torch.manual_seed(42)
result = model(src, src_mask=src_mask)
torch.manual_seed(42)
scripted_result = script_model(src, src_mask=src_mask)
self.assertEqual(result, scripted_result)
model.eval()
script_model = torch.jit.script(model)
with torch.no_grad():
result = model(src, src_mask=src_mask)
scripted_result = script_model(src, src_mask=src_mask)
self.assertEqual(result, scripted_result)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
class TestTransformers(NNTestCase):
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
torch_to_fairseq
|
def torch_to_fairseq(torch_encoder, fairseq_encoder):
for src_layer, dst_layer in zip(torch_encoder.layers, fairseq_encoder.layers):
w_q, w_k, w_v = src_layer.self_attn.in_proj_weight.chunk(3, dim=0)
b_q, b_k, b_v = src_layer.self_attn.in_proj_bias.chunk(3, dim=0)
dst_layer.self_attn.q_proj.weight = torch.nn.Parameter(w_q)
dst_layer.self_attn.q_proj.bias = torch.nn.Parameter(b_q)
dst_layer.self_attn.k_proj.weight = torch.nn.Parameter(w_k)
dst_layer.self_attn.k_proj.bias = torch.nn.Parameter(b_k)
dst_layer.self_attn.v_proj.weight = torch.nn.Parameter(w_v)
dst_layer.self_attn.v_proj.bias = torch.nn.Parameter(b_v)
dst_layer.self_attn.out_proj.weight = src_layer.self_attn.out_proj.weight
dst_layer.self_attn.out_proj.bias = src_layer.self_attn.out_proj.bias
dst_layer.fc1.weight = src_layer.linear1.weight
dst_layer.fc1.bias = src_layer.linear1.bias
# fairseq may use fusedlayernorm from nvidia apex - diff properties
dst_layer.self_attn_layer_norm.load_state_dict(src_layer.norm1.state_dict())
dst_layer.fc2.weight = src_layer.linear2.weight
dst_layer.fc2.bias = src_layer.linear2.bias
dst_layer.final_layer_norm.load_state_dict(src_layer.norm2.state_dict())
return fairseq_encoder
|
import contextlib
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
from torch.backends.cuda import sdp_kernel, SDPBackend
import torch.optim as optim
from torch.testing._internal.common_dtype import floating_types_and_half
from typing import Tuple
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
TEST_FAIRSEQ,
run_tests,
parametrize,
instantiate_parametrized_tests,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck
)
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import TEST_CUDA, SM80OrLater, PLATFORM_SUPPORTS_FUSED_SDPA
import fairseq.models.transformer as fairseq_transformer
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM86Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (8, 6)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_transformers.py
|
rand_tensor
|
def rand_tensor(*shape):
return torch.randn(shape, device=device, dtype=dtype)
# This test compares python and C++ implementations of SDP.
N, N_prime, L, S, E = 5, 2, 4, 3, 6
if input_dim == 3:
query = rand_tensor(N, L, E)
key = rand_tensor(N, S, E)
value = rand_tensor(N, S, E)
elif input_dim == 4:
query = rand_tensor(N, N_prime, L, E)
key = rand_tensor(N, N_prime, S, E)
value = rand_tensor(N, N_prime, S, E)
else:
self.fail(f'Invalid input_dim {input_dim} encountered in SDP test')
attn_mask = None
if attn_mask_dim is not None:
assert attn_mask_dim in [2, input_dim]
mask_size = (L, S) if attn_mask_dim == 2 else ((N, L, S) if input_dim == 3 else (N, N_prime, L, S))
attn_mask = (torch.ones(mask_size, device=device, dtype=torch.bool).tril() if is_causal
else torch.randint(0, 2, size=mask_size, device=device, dtype=torch.bool))
with freeze_rng_state():
# Python impl only supports float mask and 3D inputs.
attn_mask_float = attn_mask
if attn_mask_float is not None:
attn_mask_float = torch.zeros_like(attn_mask, dtype=query.dtype)
attn_mask_float.masked_fill_(attn_mask.logical_not(), float("-inf"))
q, k, v = query.view(-1, L, E), key.view(-1, S, E), value.view(-1, S, E)
a = attn_mask_float
if a is not None and attn_mask_dim > 3:
a = a.view(-1, L, S)
expected = sdp_ref(q, k, v, attn_mask=a, dropout_p=dropout_p)
if input_dim > 3:
expected = expected.view(-1, N_prime, L, E)
with freeze_rng_state():
if is_causal:
# NB: Don't pass attn_mask here
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, None, dropout_p, is_causal)
# Error case: both explicit attn_mask and is_causal are set
with self.assertRaisesRegex(RuntimeError,
"Explicit attn_mask should not be set when is_causal=True"):
torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask, dropout_p, is_causal)
else:
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask, dropout_p, is_causal)
self.assertEqual(actual, expected)
|
def rand_tensor(*shape):
return torch.randn(shape, device=device, dtype=dtype)
# This test compares python and C++ implementations of SDP.
N, N_prime, L, S, E = 5, 2, 4, 3, 6
if input_dim == 3:
query = rand_tensor(N, L, E)
key = rand_tensor(N, S, E)
value = rand_tensor(N, S, E)
elif input_dim == 4:
query = rand_tensor(N, N_prime, L, E)
key = rand_tensor(N, N_prime, S, E)
value = rand_tensor(N, N_prime, S, E)
else:
self.fail(f'Invalid input_dim {input_dim} encountered in SDP test')
attn_mask = None
if attn_mask_dim is not None:
assert attn_mask_dim in [2, input_dim]
mask_size = (L, S) if attn_mask_dim == 2 else ((N, L, S) if input_dim == 3 else (N, N_prime, L, S))
attn_mask = (torch.ones(mask_size, device=device, dtype=torch.bool).tril() if is_causal
else torch.randint(0, 2, size=mask_size, device=device, dtype=torch.bool))
with freeze_rng_state():
# Python impl only supports float mask and 3D inputs.
attn_mask_float = attn_mask
if attn_mask_float is not None:
attn_mask_float = torch.zeros_like(attn_mask, dtype=query.dtype)
attn_mask_float.masked_fill_(attn_mask.logical_not(), float("-inf"))
q, k, v = query.view(-1, L, E), key.view(-1, S, E), value.view(-1, S, E)
a = attn_mask_float
if a is not None and attn_mask_dim > 3:
a = a.view(-1, L, S)
expected = sdp_ref(q, k, v, attn_mask=a, dropout_p=dropout_p)
if input_dim > 3:
expected = expected.view(-1, N_prime, L, E)
with freeze_rng_state():
if is_causal:
# NB: Don't pass attn_mask here
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, None, dropout_p, is_causal)
# Error case: both explicit attn_mask and is_causal are set
with self.assertRaisesRegex(RuntimeError,
"Explicit attn_mask should not be set when is_causal=True"):
torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask, dropout_p, is_causal)
else:
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask, dropout_p, is_causal)
# This test the fully masked out rows case
if torch.isnan(expected).any():
row_sums = attn_mask.sum(dim=-1)
masked_out_rows = (row_sums == 0)
for _ in range((input_dim - attn_mask_dim) - 1):
masked_out_rows = masked_out_rows.unsqueeze(0)
masked_out_rows = masked_out_rows.expand(expected.shape[:-1])
# Slice out the fully masked rows from expected and actual
expected_masked_out = expected[masked_out_rows]
actual_masked_out = actual[masked_out_rows]
expected_all_nan = torch.isnan(expected_masked_out).all()
actual_all_zero = (actual_masked_out.abs().sum() == 0)
self.assertTrue(expected_all_nan)
self.assertTrue(actual_all_zero)
return
self.assertEqual(actual, expected)
|
import contextlib
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
from torch.backends.cuda import sdp_kernel, SDPBackend
import torch.optim as optim
from torch.testing._internal.common_dtype import floating_types_and_half
from typing import Tuple
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
TEST_FAIRSEQ,
run_tests,
parametrize,
instantiate_parametrized_tests,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck
)
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import TEST_CUDA, SM80OrLater, PLATFORM_SUPPORTS_FUSED_SDPA
import fairseq.models.transformer as fairseq_transformer
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM86Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (8, 6)
class TestSDPA(NNTestCase):
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_transformers.py
|
ones_tensor
|
def ones_tensor(*shape):
return torch.ones(shape, device=device, dtype=torch.float32).to(device)
S, L, E, H = 1, 2, 4, 1
qkv = ones_tensor(S, L, E)
mha = nn.MultiheadAttention(E, H).to(device)
mha.in_proj_weight = Parameter(torch.ones((E * 3, E), device=device))
mha.out_proj.weight = Parameter(torch.ones((E, E), device=device))
expected = torch.ones(size=(S, L, E)).to(device) * 16
for kernel in kernels:
with torch.backends.cuda.sdp_kernel(
enable_math=(kernel == 'math'),
enable_flash=(kernel == 'flash'),
enable_mem_efficient=(kernel == 'meff')
):
actual, _ = mha(qkv, qkv, qkv, need_weights=False, is_causal=True)
self.assertTrue(torch.equal(actual, expected))
if kernel != 'math':
# fails with embedding size not multiple of 4
with self.assertRaisesRegex(RuntimeError, "No available kernel"):
qkv_f, mha_f = ones_tensor(S, L, 2), nn.MultiheadAttention(2, H).to(device)
_ = mha_f(qkv_f, qkv_f, qkv_f, need_weights=False, is_causal=True)
torch.cuda.synchronize()
|
def ones_tensor(*shape):
return torch.ones(shape, dtype=torch.float32)
S, L, E, H = 1, 2, 4, 1
qkv = ones_tensor(S, L, E)
mha = nn.MultiheadAttention(E, H)
mha.in_proj_weight = Parameter(torch.ones((E * 3, E)))
mha.out_proj.weight = Parameter(torch.ones((E, E)))
qkv = qkv.to(float)
kpm = ones_tensor(S, L) * float("-inf")
am = ones_tensor(L, L).to(bool)
|
import contextlib
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
from torch.backends.cuda import sdp_kernel, SDPBackend
import torch.optim as optim
from torch.testing._internal.common_dtype import floating_types_and_half
from typing import Tuple
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
TEST_FAIRSEQ,
run_tests,
parametrize,
instantiate_parametrized_tests,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck
)
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import TEST_CUDA, SM80OrLater, PLATFORM_SUPPORTS_FUSED_SDPA
import fairseq.models.transformer as fairseq_transformer
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM86Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (8, 6)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_transformers.py
|
_test_te_fastpath_called
|
def _test_te_fastpath_called(model, args, kwargs=None, return_value=None, is_called=True):
if kwargs is None:
kwargs = {}
with patch('torch._transformer_encoder_layer_fwd') as fastpath_mock:
fastpath_mock.return_value = return_value
output = model(*args, **kwargs)
self.assertTrue(fastpath_mock.called == is_called)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
test_transformer_bias_is_none
|
def test_transformer_bias_is_none(self, device):
batch_size = 2
seqlen = 3
d_model = 8
nhead = 4
encoder_layer = torch.nn.TransformerEncoderLayer(d_model, nhead, bias=False, batch_first=True, device=device)
encoder_layer.eval()
x = torch.randn(batch_size, seqlen, d_model, device=device)
# runs without error
encoder_layer(x)
with self.assertWarnsRegex(UserWarning, "encoder_layer.self_attn was passed bias=False"):
encoder = torch.nn.TransformerEncoder(encoder_layer, num_layers=1).eval()
encoder(x)
with self.assertWarnsRegex(UserWarning, "self_attn was passed bias=False"):
transformer = torch.nn.Transformer(
d_model=d_model, nhead=nhead, bias=False, batch_first=True, device=device
).eval()
transformer(x, x)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
class TestTransformers(NNTestCase):
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
test_train_with_is_causal
|
def test_train_with_is_causal(self, device):
# training with is_causal
S, L, E, H = 1, 2, 2, 1
layer = nn.TransformerEncoderLayer(
d_model=2,
dim_feedforward=4,
nhead=H,
batch_first=True,
activation="gelu",
dropout=0,
)
criterion = nn.MSELoss()
encoder = nn.TransformerEncoder(layer, 2).to(device)
optimizer = optim.SGD(encoder.parameters(), lr=0.1, momentum=0.9)
encoder.train()
encoder.train()
optimizer.zero_grad()
inputs = torch.randn(S, L, E).to(device)
outputs = encoder(inputs, is_causal=True)
loss = criterion(outputs[:, 0:2, :], inputs[:, 0:2, :])
loss.backward()
optimizer.step()
# inference with is_causal
t_qvk = torch.randn((S, L, E), device=device, dtype=torch.float32)
mha = nn.MultiheadAttention(E, H).to(device)
attn_out, _ = mha(t_qvk, t_qvk, t_qvk, is_causal=True)
# Can't give both attn_mask AND is_causal
attn_mask = torch.randint(0, 2, size=(L, L), device=device, dtype=torch.bool)
with self.assertRaisesRegex(AssertionError, "Only allow causal mask or attn_mask"):
_ = mha(t_qvk, t_qvk, t_qvk, attn_mask=attn_mask, is_causal=True)
# # Passing a causal mask sets is_causal to 1
causal_mask = torch.triu(
torch.ones(L, L, device=inputs.device) * float('-inf'), diagonal=1
).to(torch.bool)
mock_layer = MagicMock(torch.nn.MultiheadAttention(E, H), return_value=inputs)
encoder.layers[0] = mock_layer
outputs = encoder(inputs, mask=causal_mask)
mock_layer.assert_called_with(ANY, src_mask=ANY, is_causal=True, src_key_padding_mask=ANY)
# check expected numerical values with all kernels
self.is_causal_kernels(["math"], device)
|
def test_train_with_is_causal(self, device):
# training with is_causal
S, L, E, H = 1, 2, 2, 1
layer = nn.TransformerEncoderLayer(
d_model=2,
dim_feedforward=4,
nhead=H,
batch_first=True,
activation="gelu",
dropout=0,
)
criterion = nn.MSELoss()
encoder = nn.TransformerEncoder(layer, 2).to(device)
optimizer = optim.SGD(encoder.parameters(), lr=0.1, momentum=0.9)
encoder.train()
encoder.train()
optimizer.zero_grad()
inputs = torch.randn(S, L, E).to(device)
mask = torch.nn.Transformer.generate_square_subsequent_mask(
inputs.size(1), device=device
)
outputs = encoder(inputs, mask=mask, is_causal=True)
loss = criterion(outputs[:, 0:2, :], inputs[:, 0:2, :])
loss.backward()
optimizer.step()
# inference with is_causal
t_qvk = torch.randn((S, L, E), device=device, dtype=torch.float32)
mha = nn.MultiheadAttention(E, H).to(device)
mask = torch.nn.Transformer.generate_square_subsequent_mask(
S, device=device
)
attn_out, _ = mha(t_qvk, t_qvk, t_qvk, attn_mask=mask, is_causal=True)
# Can't give only is_causal
attn_mask = torch.randint(0, 2, size=(L, L), device=device, dtype=torch.bool)
with self.assertRaises(RuntimeError):
_ = mha(t_qvk, t_qvk, t_qvk, is_causal=True)
# # Passing a causal mask sets is_causal to 1
causal_mask = torch.triu(
torch.ones(L, L, device=inputs.device) * float('-inf'), diagonal=1
).to(torch.bool)
mock_layer = MagicMock(torch.nn.MultiheadAttention(E, H), return_value=inputs)
encoder.layers[1] = mock_layer
outputs = encoder(inputs, mask=causal_mask)
mock_layer.assert_called_with(ANY, src_mask=ANY, is_causal=True, src_key_padding_mask=ANY)
# check expected numerical values with all kernels
self.is_causal_kernels([SDPBackend.MATH], device)
|
import contextlib
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
from torch.backends.cuda import sdp_kernel, SDPBackend
import torch.optim as optim
from torch.testing._internal.common_dtype import floating_types_and_half
from typing import Tuple
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
TEST_FAIRSEQ,
run_tests,
parametrize,
instantiate_parametrized_tests,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck
)
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import TEST_CUDA, SM80OrLater, PLATFORM_SUPPORTS_FUSED_SDPA
import fairseq.models.transformer as fairseq_transformer
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM86Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (8, 6)
class TestTransformers(NNTestCase):
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
class TestTransformers(NNTestCase):
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_transformers.py
|
ones_tensor
|
def ones_tensor(*shape):
return torch.ones(shape, device=device, dtype=torch.float32).to(device)
S, L, E, H = 1, 2, 4, 1
qkv = ones_tensor(S, L, E)
mha = nn.MultiheadAttention(E, H).to(device)
mha.in_proj_weight = Parameter(torch.ones((E * 3, E), device=device))
mha.out_proj.weight = Parameter(torch.ones((E, E), device=device))
expected = torch.ones(size=(S, L, E)).to(device) * 16
for kernel in kernels:
with torch.backends.cuda.sdp_kernel(
enable_math=(kernel == 'math'),
enable_flash=(kernel == 'flash'),
enable_mem_efficient=(kernel == 'meff')
):
actual, _ = mha(qkv, qkv, qkv, need_weights=False, is_causal=True)
self.assertTrue(torch.equal(actual, expected))
if kernel != 'math':
# fails with embedding size not multiple of 4
with self.assertRaisesRegex(RuntimeError, "No available kernel"):
qkv_f, mha_f = ones_tensor(S, L, 2), nn.MultiheadAttention(2, H).to(device)
_ = mha_f(qkv_f, qkv_f, qkv_f, need_weights=False, is_causal=True)
torch.cuda.synchronize()
|
def ones_tensor(*shape):
return torch.ones(shape, dtype=torch.float32)
S, L, E, H = 1, 2, 4, 1
qkv = ones_tensor(S, L, E)
mha = nn.MultiheadAttention(E, H)
mha.in_proj_weight = Parameter(torch.ones((E * 3, E)))
mha.out_proj.weight = Parameter(torch.ones((E, E)))
qkv = qkv.to(float)
kpm = ones_tensor(S, L) * float("-inf")
am = ones_tensor(L, L).to(bool)
|
import contextlib
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
from torch.backends.cuda import sdp_kernel, SDPBackend
import torch.optim as optim
from torch.testing._internal.common_dtype import floating_types_and_half
from typing import Tuple
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
TEST_FAIRSEQ,
run_tests,
parametrize,
instantiate_parametrized_tests,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck
)
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import TEST_CUDA, SM80OrLater, PLATFORM_SUPPORTS_FUSED_SDPA
import fairseq.models.transformer as fairseq_transformer
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM86Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (8, 6)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_transformers.py
|
test_is_causal_gpu
|
def test_is_causal_gpu(self):
device = 'cuda'
self.is_causal_kernels(["math", "meff"], device)
|
def test_is_causal_gpu(self):
device = 'cuda'
self.is_causal_kernels([SDPBackend.MATH, SDPBackend.EFFICIENT_ATTENTION], device)
|
import contextlib
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
from torch.backends.cuda import sdp_kernel, SDPBackend
import torch.optim as optim
from torch.testing._internal.common_dtype import floating_types_and_half
from typing import Tuple
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
TEST_FAIRSEQ,
run_tests,
parametrize,
instantiate_parametrized_tests,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck
)
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import TEST_CUDA, SM80OrLater, PLATFORM_SUPPORTS_FUSED_SDPA
import fairseq.models.transformer as fairseq_transformer
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM86Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (8, 6)
class TestTransformers(NNTestCase):
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
class TestTransformers(NNTestCase):
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_transformers.py
|
pad_last_dim
|
def pad_last_dim(input_tensor, alignment_size, slice: bool = False):
last_dim_size = input_tensor.size(-1)
if (last_dim_size % alignment_size == 0):
return input_tensor, last_dim_size
pad_count = alignment_size - (last_dim_size % alignment_size)
padded_tensor = F.pad(input_tensor, (0, pad_count))
if slice:
return padded_tensor[..., :last_dim_size], last_dim_size
return padded_tensor, last_dim_size
class TestSDPA(NNTestCase):
""" Used to test generic functionality of scaled_dot_product_attention
Summary:
If you are adding a new test to this class, make sure that it runs
for both cpu and cuda. If you're test is only applicable to cuda,
add it to TestSDPACudaOnly.
"""
@parametrize("contiguous_inputs", [True, False])
def test_sdp_math_gradcheck(self, device, contiguous_inputs: bool):
batch_size, seq_len, num_heads, head_dim = 4, 4, 2, 16
shape = SdpaShape(batch_size, num_heads, seq_len, head_dim)
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device,
dtype=torch.float64, requires_grad=True, packed=True)
qkv = make_tensor(shape)
query, key, value = qkv.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
if contiguous_inputs:
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
with sdpa_kernel(backends=[SDPBackend.MATH]):
assert gradcheck(lambda *args, **kwargs:
wrapper_set_seed(torch.nn.functional.scaled_dot_product_attention, *args, **kwargs),
(query, key, value, None, 0.0, False)
)
@parametrize("kernel", [SDPBackend.MATH])
def test_scaled_dot_product_attention_math_with_negative_scale(self, device, kernel: SDPBackend):
# https://github.com/pytorch/pytorch/issues/105190.
def ref(x):
v1 = torch.matmul(x, x.transpose(-1, -2))
v2 = v1 / -0.0001
v3 = v2.softmax(dim=-1)
v4 = torch.matmul(v3, x)
return v4
x = torch.randn(1, 3, 64, 64, device=device)
ref_result = ref(x)
with sdpa_kernel(backends=[kernel]):
sdp_math = torch.nn.functional.scaled_dot_product_attention(x, x, x, scale=-1.0 / 0.0001)
self.assertEqual(ref_result, sdp_math)
class TestSDPACpuOnly(NNTestCase):
""" Used to test CPU only functionality of scaled_dot_product_attention """
@parametrize("type", ["dense", "nested"])
@parametrize("dropout", [0.0, 0.7])
@parametrize("dtype", [torch.float64, torch.float32, torch.bfloat16, torch.half])
@skipIfTorchDynamo()
def test_fused_sdp_choice_cpu(self, device, type: str, dropout: float, dtype: torch.dtype):
# Test that cpu and nestedtensor cpu return MATH backend
make_tensor = partial(rand_sdpa_tensor, type=type, device=device, dtype=dtype)
size = SdpaShape(2, 8, 128, 64)
q, k, v = make_tensor(size), make_tensor(size), make_tensor(size)
if type == "nested" \
or dropout > 0.0 \
or dtype not in [torch.float32, torch.float64, torch.bfloat16, torch.float16]:
assert torch._fused_sdp_choice(q, k, v, dropout_p=dropout) == SDPBackend.MATH.value
else:
assert torch._fused_sdp_choice(q, k, v, dropout_p=dropout) == SDPBackend.FLASH_ATTENTION.value
@parametrize("fused_kernel", [SDPBackend.FLASH_ATTENTION])
@parametrize("dtype", [torch.float64, torch.float32, torch.bfloat16, torch.float16])
@parametrize("batch_size", [2, 12])
@parametrize("q_seq_len", [11, 514, 1030])
@parametrize("kv_seq_len", [17, 514])
@parametrize("n_head", [1, 3])
@parametrize("head_dim", [8])
@parametrize("mask_dim", [2, 4])
@parametrize("bool_mask", [False, True])
@parametrize("train", [True, False])
@parametrize("casual", [True, False])
@parametrize("set_attn_mask", [True, False])
def test_scaled_dot_product_fused_attention_mask_vs_math_cpu(
self,
device,
fused_kernel,
dtype,
batch_size,
q_seq_len,
kv_seq_len,
n_head,
head_dim,
mask_dim,
bool_mask,
train,
casual,
set_attn_mask,
):
tol = Tolerances(1e-5, 5e-6)
if dtype is torch.bfloat16:
tol = Tolerances(5e-2, 5e-2)
if dtype is torch.float16:
tol = Tolerances(1e-2, 1e-2)
for mask_shape in itertools.product(
[q_seq_len, 1], [kv_seq_len, 1]
) if mask_dim == 2 else itertools.product(
[batch_size, 1], [n_head, 1], [q_seq_len, 1], [kv_seq_len, 1]
):
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device, dtype=dtype, requires_grad=False)
q_shape = SdpaShape(batch_size, n_head, q_seq_len, head_dim)
kv_shape = SdpaShape(batch_size, n_head, kv_seq_len, head_dim)
q = make_tensor(q_shape)
k = make_tensor(kv_shape)
v = make_tensor(kv_shape)
q2, k2, v2 = q.clone(), k.clone(), v.clone()
if train:
q.requires_grad_(True)
k.requires_grad_(True)
v.requires_grad_(True)
q2.requires_grad_(True)
k2.requires_grad_(True)
v2.requires_grad_(True)
if dtype in [torch.bfloat16, torch.float16]:
q2, k2, v2 = q2.float(), k2.float(), v2.float()
# (B, nh, T, hs)
q = q.view(batch_size, q_seq_len, n_head, head_dim).transpose(1, 2)
k = k.view(batch_size, kv_seq_len, n_head, head_dim).transpose(1, 2)
v = v.view(batch_size, kv_seq_len, n_head, head_dim).transpose(1, 2)
if set_attn_mask and not casual:
if bool_mask:
attn_mask = torch.randint(0, 2, size=mask_shape, dtype=torch.bool, device=device)
else:
attn_mask = torch.randn(mask_shape, dtype=dtype, device=device)
else:
attn_mask = None
q2 = q2.view(batch_size, q_seq_len, n_head, head_dim).transpose(1, 2)
k2 = k2.view(batch_size, kv_seq_len, n_head, head_dim).transpose(1, 2)
v2 = v2.view(batch_size, kv_seq_len, n_head, head_dim).transpose(1, 2)
with sdpa_kernel(backends=[fused_kernel]):
actual = torch.nn.functional.scaled_dot_product_attention(
q, k, v, attn_mask=attn_mask, dropout_p=0.0, is_causal=casual)
with sdpa_kernel(backends=[SDPBackend.MATH]):
if not bool_mask and dtype in [torch.bfloat16, torch.float16] and attn_mask is not None:
attn_mask = attn_mask.float()
math_ref = torch.nn.functional.scaled_dot_product_attention(
q2, k2, v2, attn_mask=attn_mask, dropout_p=0.0, is_causal=casual)
if dtype in [torch.bfloat16, torch.float16]:
math_ref = math_ref.to(dtype)
self.assertFalse(torch.isnan(math_ref).any())
self.assertFalse(torch.isnan(actual).any())
self.assertEqual(actual, math_ref, atol=tol.atol, rtol=tol.rtol)
if train:
actual.sum().backward()
math_ref.sum().backward()
grad_q_actual, grad_k_actual, grad_v_actual = q.grad, k.grad, v.grad
grad_q_ref, grad_k_ref, grad_v_ref = q2.grad, k2.grad, v2.grad
self.assertEqual(grad_q_actual, grad_q_ref, atol=tol.atol, rtol=tol.rtol)
self.assertEqual(grad_k_actual, grad_k_ref, atol=tol.atol, rtol=tol.rtol)
self.assertEqual(grad_v_actual, grad_v_ref, atol=tol.atol, rtol=tol.rtol)
def test_sdpa_with_inf(self, device):
# https://github.com/pytorch/pytorch/issues/127055.
full = torch.full((600, 600), float("-inf"), device=device)
mask = torch.triu(full, diagonal=1) + torch.tril(full, diagonal=-10)
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device, dtype=torch.float32, requires_grad=False)
input_shape = SdpaShape(1, 600, 2, 8)
q = make_tensor(input_shape)
k = make_tensor(input_shape)
v = make_tensor(input_shape)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask)
with sdpa_kernel(backends=[SDPBackend.FLASH_ATTENTION]):
actual = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask)
self.assertEqual(math_ref, actual)
def test_sdpa_backward_with_gradient(self, device):
# https://github.com/pytorch/pytorch/issues/133671.
def sdpa_helper():
torch.manual_seed(777)
query = (
torch.empty(size=[2, 2, 49, 32], dtype=torch.float32, device=device)
.uniform_(-1, 1)
.requires_grad_(True)
)
key = (
torch.empty(size=[2, 2, 49, 32], dtype=torch.float32, device=device)
.uniform_(-1, 1)
.requires_grad_(True)
)
value = (
torch.empty(size=[2, 2, 49, 32], dtype=torch.float32, device=device)
.uniform_(-1, 1)
.requires_grad_(True)
)
res = torch.nn.functional.scaled_dot_product_attention(
query, key, value, None, 0.0, False
)
res_grad = (
torch.empty_like(res, device=device)
.uniform_(-1, 1)
)
res.backward(res_grad, retain_graph=True)
return res, query.grad, key.grad, value.grad
with sdpa_kernel(backends=[SDPBackend.MATH]):
res_ref, query_grad_ref, key_grad_ref, value_grad_ref = sdpa_helper()
with sdpa_kernel(backends=[SDPBackend.FLASH_ATTENTION]):
res_actual, query_grad_actual, key_grad_actual, value_grad_actual = sdpa_helper()
self.assertEqual(res_ref, res_actual)
self.assertEqual(query_grad_ref, query_grad_actual)
self.assertEqual(key_grad_ref, key_grad_actual)
self.assertEqual(value_grad_ref, value_grad_actual)
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("backend", [SDPBackend.EFFICIENT_ATTENTION, SDPBackend.FLASH_ATTENTION])
@parametrize("seq_len", [32, 64, 128])
@parametrize("head_dim", [16, 32])
@parametrize("dtype", [torch.float32, torch.float16])
def test_fully_masked_out_rows(self, backend, device, seq_len, head_dim, dtype):
def attention_inputs(seq_len, head_dim, device, dtype, mask_every_n_rows=4):
query = torch.rand(1, 1, seq_len, head_dim, requires_grad=True, device=device, dtype=dtype)
key = torch.rand(1, 1, seq_len, head_dim, requires_grad=True, device=device, dtype=dtype)
value = torch.rand(1, 1, seq_len, head_dim, requires_grad=True, device=device, dtype=dtype)
# Create a mask with deterministic row masking
mask = torch.ones(1, 1, seq_len, seq_len, dtype=torch.bool, device=device)
# Mask every nth row
mask[0, 0, ::mask_every_n_rows, :] = False
# Create a fixed pattern for element-wise masking
element_mask = torch.zeros(seq_len, seq_len, dtype=torch.bool, device=device)
element_mask[torch.arange(seq_len)[:, None] % 5 == torch.arange(seq_len) % 5] = True
# Combine row masking and element-wise masking
mask = mask & element_mask.unsqueeze(0).unsqueeze(0)
return query, key, value, mask
def compute_output_and_grads(query, key, value, mask, backend):
with sdpa_kernel(backend):
masked_out = scaled_dot_product_attention(query, key, value, attn_mask=mask)
loss = masked_out.sum()
grads = torch.autograd.grad(loss, [query, key, value])
return masked_out, grads
if backend == SDPBackend.FLASH_ATTENTION and "cuda" in str(device):
unittest.skip("FlashAttention does not support masks on cuda")
return
if backend == SDPBackend.EFFICIENT_ATTENTION and "cpu" in str(device):
unittest.skip("EfficientAttention does not support masks on cpu")
return
query, key, value, mask = attention_inputs(seq_len, head_dim, device, dtype)
# Compute results for the tested backend
backend_out, backend_grads = compute_output_and_grads(query, key, value, mask, backend)
# Compute results for the Math backend
math_out, math_grads = compute_output_and_grads(query, key, value, mask, SDPBackend.MATH)
# Compare outputs
torch.testing.assert_close(backend_out, math_out, atol=5e-3, rtol=0)
self.assertFalse(backend_out.isnan().any())
self.assertFalse(math_out.isnan().any())
# Compare gradients
for bg, mg in zip(backend_grads, math_grads):
torch.testing.assert_close(bg, mg, atol=3e-3, rtol=0)
self.assertFalse(bg.isnan().any())
self.assertFalse(mg.isnan().any())
# Check if masked rows are zero in output
mask_sum = mask.sum(dim=-1, keepdim=True)
masked_rows = (mask_sum == 0).expand_as(backend_out)
self.assertTrue((mask_sum == 0).sum() > 0, "No fully masked out rows found")
assert torch.all(backend_out[masked_rows] == 0), \
f"Non-zero values in fully masked rows for {backend=}"
# Check if gradients for masked rows are zero
grad_query = backend_grads[0]
assert torch.all(grad_query[masked_rows] == 0), f"Non-zero gradients in fully masked rows for {backend=}"
@parametrize("dtype", [torch.float32, torch.float16])
@parametrize("fill_val", [float("inf")])
def test_non_masked_rows_nan_props(self, device, dtype, fill_val):
query = torch.randn(1, 2, 4, 16, device=device, dtype=dtype)
# a single NaN in the query input
query[0, 1, 2, 3] = fill_val
query = query.detach().requires_grad_(True)
key = torch.randn(1, 2, 4, 16, device=device, dtype=dtype, requires_grad=True)
value = torch.randn(1, 2, 4, 16, device=device, dtype=dtype, requires_grad=True)
out = torch.nn.functional.scaled_dot_product_attention(query, key, value)
self.assertTrue(torch.isnan(out).any())
out.sum().backward()
self.assertTrue(torch.isnan(query.grad).any())
@parametrize("kernel", [SDPBackend.MATH])
def test_scaled_dot_product_attention_math_with_negative_scale(self, device, kernel: SDPBackend):
# https://github.com/pytorch/pytorch/issues/105190.
def ref(x):
v1 = torch.matmul(x, x.transpose(-1, -2))
v2 = v1 / -0.0001
v3 = v2.softmax(dim=-1)
v4 = torch.matmul(v3, x)
return v4
x = torch.randn(1, 3, 64, 64, device=device)
ref_result = ref(x)
with sdpa_kernel(backends=[kernel]):
sdp_math = torch.nn.functional.scaled_dot_product_attention(x, x, x, scale=-1.0 / 0.0001)
self.assertEqual(ref_result, sdp_math)
class TestSDPACudaOnly(NNTestCase):
""" Used to test CUDA only functionality of scaled_dot_product_attention
Quarks:
There is some trickiness with this function. Its runtime behavior
is dependent on the CUDA architecture you are testing it on. See
`PLATFORM_SUPPORTS_FUSED_ATTENTION` at the top of the file.
Summary:
Math: always supported
FlashAttention: Supported on sm80 or newer hardware
MemEfficientAttention: Supported on sm50 or newer hardware
"""
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
# TODO USED FOR TESTING THE SCORES, e.g. testing ALIBI we don't need this now
def normalize_flash_attn_S(
self,
attn_unnorm,
q,
k,
v,
query_padding_mask=None,
key_padding_mask=None,
attn_bias=None,
is_dropout=False,
causal=False,
window_size=(-1, -1), # -1 means infinite window size
scale=None,
):
"""
Arguments:
q: (batch_size, seqlen_q, nheads, head_dim)
k, v: (batch_size, seqlen_k, nheads, head_dim)
key_padding_mask: (batch_size, seqlen_q)
attn_bias: broadcastable to (batch_size, nheads, seqlen_q, seqlen_k)
Output:
softmax_lse: (batch_size, nheads, seqlen_q)
softmax_max: (batch_size, nheads, seqlen_q)
"""
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
if causal:
window_size = (window_size[0], 0)
q, k, v = q.float(), k.float(), v.float()
_, seqlen_q, _, head_dim = q.shape
seqlen_k = k.shape[1]
b = q.shape[0]
from torch.nn.attention.bias import _calculate_scale
scale = _calculate_scale(head_dim, scale)
scores = torch.matmul(q.transpose(1, 2) * scale, k.permute(0, 2, 3, 1))
if key_padding_mask is not None:
scores.masked_fill_(~key_padding_mask.view(b, 1, 1, -1), float("-inf"))
if window_size[0] >= 0 or window_size[1] >= 0:
local_mask = self.construct_local_mask(
seqlen_q,
seqlen_k,
window_size,
query_padding_mask,
key_padding_mask,
q.device,
)
scores.masked_fill_(local_mask, float("-inf"))
if attn_bias is not None:
scores = scores + attn_bias.to(dtype=scores.dtype)
block_size_n = _get_block_size_n(scores.device, head_dim, is_dropout, causal)
scores_block = scores.split(block_size_n, dim=-1)
lse_block = torch.stack([torch.logsumexp(s, dim=-1) for s in scores_block], dim=-1)
lse = torch.logsumexp(lse_block, dim=-1)
# lse could be -inf (i.e. all values in scores are -inf), and we want to set those to inf
# so that when we do torch.exp(m - lse), we get 0.0 instead of NaN.
lse[lse == float("-inf")] = float("inf")
scores_max_block = torch.stack([torch.amax(s, dim=-1) for s in scores_block], dim=-1)
cummax_block = torch.cummax(scores_max_block.flip(-1), dim=-1).values.flip(-1).unbind(dim=-1)
attn_unnorm_block = attn_unnorm.split(block_size_n, dim=-1)
attn_norm = torch.cat(
[
a * (torch.exp(m - lse)).unsqueeze(-1)
for a, m in zip(attn_unnorm_block, cummax_block)
],
dim=-1,
)
if query_padding_mask is not None:
attn_norm.masked_fill_(~query_padding_mask.view(b, 1, -1, 1), 0.0)
# attn_norm.masked_fill_(rearrange(~query_padding_mask, "b s -> b 1 s 1"), 0.0)
return attn_norm.to(dtype=attn_unnorm.dtype)
def construct_local_mask(self, seqlen_q, seqlen_k, window_size, query_padding_mask, key_padding_mask, device):
# row_idx = rearrange(torch.arange(seqlen_q, device=device, dtype=torch.long), "s -> s 1")
row_idx = torch.arange(seqlen_q, device=device, dtype=torch.long).view(-1, 1)
col_idx = torch.arange(seqlen_k, device=device, dtype=torch.long)
sk = (
seqlen_k
if key_padding_mask is None
else key_padding_mask.sum(-1).view(-1, 1, 1, 1)
# else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1")
)
sq = (
seqlen_q
if query_padding_mask is None
else query_padding_mask.sum(-1).view(-1, 1, 1, 1)
# else rearrange(query_padding_mask.sum(-1), "b -> b 1 1 1")
)
if window_size[0] < 0:
return col_idx > row_idx + sk - sq + window_size[1]
else:
sk = torch.full_like(col_idx, seqlen_k) if key_padding_mask is None else sk
return torch.logical_or(
col_idx > torch.minimum(row_idx + sk - sq + window_size[1], sk),
col_idx < row_idx + sk - sq - window_size[0],
)
def convert_flash_attn_S_to_softmax(
self,
S,
seqlen_q,
seqlen_k,
query_padding_mask,
key_padding_mask,
causal=False,
window_size=(-1, -1), # -1 means infinite window size
):
"""FlashAttention stores the S matrix in a different way.
Arguments:
S: (batch_size, nheads, seqlen_q, seqlen_k)
query_padding_mask: (batch_size, seqlen_q)
key_padding_mask: (batch_size, seqlen_k)
"""
if TEST_WITH_ROCM:
return S
b = S.shape[0]
if causal:
window_size = (window_size[0], 0)
seqlen_q_rounded, seqlen_k_rounded = S.shape[-2:]
S_converted = S
if window_size[0] >= 0 or window_size[1] >= 0:
local_mask = self.construct_local_mask(
seqlen_q,
seqlen_k,
window_size,
query_padding_mask,
key_padding_mask,
S.device,
)
local_mask = F.pad(
local_mask,
(0, seqlen_k_rounded - seqlen_k, 0, seqlen_q_rounded - seqlen_q),
value=True,
)
S_converted = S_converted.masked_fill(local_mask, 0.0)
# Need to zero out things not in attention_mask in case S was initialized with random values
# and some of those values aren't overwritten.
seqlen_q_og = (
query_padding_mask.shape[-1] if query_padding_mask is not None else seqlen_q_rounded
)
if query_padding_mask is not None:
query_padding_mask = F.pad(query_padding_mask, (0, seqlen_q_rounded - seqlen_q_og))
# S_converted = S_converted.masked_fill(rearrange(~query_padding_mask, "b s -> b 1 s 1"), 0.0)
S_converted = S_converted.masked_fill(~query_padding_mask.view(b, 1, -1, 1), 0.0)
seqlen_k_og = key_padding_mask.shape[-1] if key_padding_mask is not None else seqlen_k
if key_padding_mask is not None:
key_padding_mask = F.pad(key_padding_mask, (0, seqlen_k_rounded - seqlen_k_og))
S_converted = S_converted.masked_fill(~key_padding_mask.view(b, 1, 1, -1), 0.0)
# S_converted = S_converted.masked_fill(rearrange(~key_padding_mask, "b s -> b 1 1 s"), 0.0)
S_converted = F.pad(S_converted, (0, 0, 0, seqlen_q_og - seqlen_q_rounded))
S_converted = F.pad(S_converted, (0, seqlen_k_og - seqlen_k_rounded))
return S_converted[:, :, :seqlen_q, :seqlen_k]
@skipIfRocm # No cuDNN Attention
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "cuDNN Attention is not supported on this system")
def test_cudnn_attention_different_dk_dv(self, device):
dtype = torch.bfloat16
make_tensor = partial(torch.rand, device=device, dtype=dtype, requires_grad=True)
batch, num_heads, head_dim_k, head_dim_v = 32, 16, 128, 64
seq_len = 640
q_shape = SdpaShape(batch, num_heads, seq_len, head_dim_k)
k_shape = SdpaShape(batch, num_heads, seq_len, head_dim_k)
v_shape = SdpaShape(batch, num_heads, seq_len, head_dim_v)
query, key, value = make_tensor(q_shape), make_tensor(k_shape), make_tensor(v_shape)
with sdpa_kernel(backends=[SDPBackend.CUDNN_ATTENTION]):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query.contiguous().to(torch.float32),
key.contiguous().to(torch.float32),
value.contiguous().to(torch.float32),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous().to(dtype), atol=1e-3, rtol=1e-2)
@skipIfRocm # No cuDNN Attention
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "cuDNN Attention is not supported on this system")
def test_cudnn_attention_fail_d128(self, device):
# Test that cuDNN attention dispatching correctly bails out on d > 128
b, h = 1, 2
s_q, s_kv = 128, 128
d_qk, d_v = 128, 144
q = torch.randn(b, h, s_q, d_qk, device=device, dtype=torch.bfloat16)
k = torch.randn(b, h, s_kv, d_qk, device=device, dtype=torch.bfloat16)
v = torch.randn(b, h, s_kv, d_v, device=device, dtype=torch.bfloat16)
with sdpa_kernel(backends=[SDPBackend.CUDNN_ATTENTION]):
with self.assertRaisesRegex(RuntimeError, "No available kernel."):
o = torch.nn.functional.scaled_dot_product_attention(q, k, v)
@skipIfRocm # No cuDNN Attention
@unittest.skipIf(not PLATFORM_SUPPORTS_CUDNN_ATTENTION, "cudnn Attention is not supported on this system")
def test_cudnn_attention_trivial_output_transpose(self, device):
# see also: https://github.com/pytorch/pytorch/issues/134001
x = torch.randn(2, 4, 1, 64, device='cuda', dtype=torch.float16, requires_grad=True)
x2 = x.transpose(1, 2)
with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.CUDNN_ATTENTION):
o = torch.nn.functional.scaled_dot_product_attention(x2, x2, x2).transpose(1, 2).reshape(2, 64, 4)
o.backward(o)
x_cpu = x.clone().cpu().detach()
x_cpu.requires_grad = True
x2_cpu = x_cpu.transpose(1, 2)
o = torch.nn.functional.scaled_dot_product_attention(x2_cpu, x2_cpu, x2_cpu).transpose(1, 2).reshape(2, 64, 4)
o.backward(o)
torch.testing.assert_close(x.grad, x_cpu.grad.cuda(), atol=7e-3, rtol=7e-3)
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("mask_dim", [1, 2, 3, 4])
def test_mem_efficient_attention_mask_variants(self, device, mask_dim: List[int]):
dtype = torch.float16
make_tensor = partial(torch.rand, device=device, dtype=dtype, requires_grad=True)
batch, num_heads, head_dim = 8, 8, 64
seq_len_q, seq_len_kv = 64, 15
query = make_tensor(SdpaShape(batch, num_heads, seq_len_q, head_dim))
kv_shape = SdpaShape(batch, num_heads, seq_len_kv, head_dim)
key, value = make_tensor(kv_shape), make_tensor(kv_shape)
if mask_dim == 1:
mask = torch.randn((seq_len_kv,), device=device, dtype=dtype)
elif mask_dim == 2:
mask = torch.randn((seq_len_q, seq_len_kv), device=device, dtype=dtype)
elif mask_dim == 3:
mask = torch.randn((num_heads, seq_len_q, seq_len_kv), device=device, dtype=dtype)
elif mask_dim == 4:
mask = torch.randn((batch, num_heads, seq_len_q, seq_len_kv), device=device, dtype=dtype)
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
out = F.scaled_dot_product_attention(query, key, value, mask)
out.sum().backward()
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("dtype", [torch.float, torch.float16])
def test_mem_eff_attention_non_contiguous_mask(self, device, dtype):
make_tensor = partial(torch.rand, device=device, dtype=dtype, requires_grad=True)
batch, num_heads, head_dim = 8, 8, 64
seq_len_q, seq_len_kv = 64, 16
query = make_tensor(SdpaShape(batch, num_heads, seq_len_q, head_dim))
kv_shape = SdpaShape(batch, num_heads, seq_len_kv, head_dim)
key, value = make_tensor(kv_shape), make_tensor(kv_shape)
mask = torch.randn((batch, num_heads, seq_len_q, seq_len_kv), device=device, dtype=dtype)
mask = torch.as_strided(mask, (batch, num_heads, seq_len_q, seq_len_kv), (0, 0, 0, 1))
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
out = F.scaled_dot_product_attention(query, key, value, mask)
out.sum().backward()
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("dtype", [torch.float, torch.float16])
def test_mem_eff_attention_long_sequence_mask(self, device, dtype):
if torch.cuda.get_device_properties('cuda').total_memory < 80 * 2**30:
unittest.skip("This test requires substatnial GPU memory.")
return
make_tensor = partial(torch.rand, device=device, dtype=dtype, requires_grad=True)
batch, num_heads, head_dim = 1, 32, 64
seq_len_q, seq_len_kv = 8192, 8192
query = make_tensor(SdpaShape(batch, num_heads, seq_len_q, head_dim))
kv_shape = SdpaShape(batch, num_heads, seq_len_kv, head_dim)
key, value = make_tensor(kv_shape), make_tensor(kv_shape)
mask = torch.randn((batch, num_heads, seq_len_q, seq_len_kv), device=device, dtype=dtype)
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
out = F.scaled_dot_product_attention(query, key, value, mask)
out.sum().backward()
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Fused SDPA was not built for this system")
def test_mem_eff_attention_non_contig_mask_bug(self, device):
# Without the fix this produces `AssertionError: assert 0.07352933287620544 < 1e-07`
# Shapes taken from repro
query_size = (3, 16, 1, 128)
query_strides = (2304, 128, 2048, 1)
key_size = (3, 16, 14, 128)
key_strides = (3584, 0, 256, 1)
value_size = (3, 16, 14, 128)
value_strides = (3584, 0, 256, 1)
attention_mask_size = (3, 1, 1, 14)
attn_mask_strides = (14, 14, 14, 1)
# Calculate the number of elements needed for each tensor
query_num_elements = max(size * stride for size, stride in zip(query_size, query_strides))
key_num_elements = max(size * stride for size, stride in zip(key_size, key_strides))
value_num_elements = max(size * stride for size, stride in zip(value_size, value_strides))
attention_mask_num_elements = max(size * stride for size, stride in zip(attention_mask_size, attn_mask_strides))
# Create the tensors with the specified sizes and strides
query = torch.randn(query_num_elements, device=device).as_strided(query_size, query_strides)
key = torch.randn(key_num_elements, device=device).as_strided(key_size, key_strides)
value = torch.randn(value_num_elements, device=device).as_strided(value_size, value_strides)
bias = torch.randn(attention_mask_num_elements, device=device).as_strided(attention_mask_size, attn_mask_strides)
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
out = F.scaled_dot_product_attention(query, key, value, bias)
out_contig = F.scaled_dot_product_attention(query, key, value, bias.contiguous())
max_diff = (out - out_contig).abs().mean()
self.assertTrue(max_diff.item() < 1e-7)
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Fused SDPA was not built for this system")
def test_singelton_head_dim_stride_ne_1(self, device):
query = torch.tensor([[[[1, 2]]]], dtype=torch.float16, device=device)
query = query.transpose(-1, -2)
key = torch.tensor([[[[1]]]], dtype=torch.float16, device=device)
value = torch.tensor([[[[1]]]], dtype=torch.float16, device=device)
with torch.backends.cuda.sdp_kernel(enable_math=False, enable_flash=True, enable_mem_efficient=False):
scaled_dot_product_attention(query, key, value)
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("type", ["dense", "nested"])
@parametrize("is_contiguous", [True, False])
def test_scaled_dot_product_attention_fused_kernels_packed(self, device, type: str, is_contiguous: bool):
if TEST_WITH_ROCM and type == 'nested':
self.skipTest("ROCM does not support efficient attention on nested tensors, for now")
make_tensor = partial(rand_sdpa_tensor, type=type, device=device, dtype=torch.float16, packed=True)
batch_size, seq_len, num_heads, head_dim = 32, 64, 16, 64
shape = SdpaShape(batch_size, num_heads, seq_len, head_dim)
# Test Packed
qkv = make_tensor(shape)
query, key, value = qkv.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
if is_contiguous:
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query.contiguous(), key.contiguous(), value.contiguous(),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous(), atol=2e-3, rtol=1e-2)
@skipIfRocm # Missing nested and EFFICIENT_ATTENTION
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("type", ["dense", "nested"])
@parametrize("fused_kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION] if
PLATFORM_SUPPORTS_FLASH_ATTENTION else [SDPBackend.EFFICIENT_ATTENTION])
def test_scaled_dot_product_attention_fused_kernels_packed_accuracy(self, device, type: str, fused_kernel: str):
def rand_nt(shape):
batch, seq_len, num_heads, head_dim = shape
tensors = [6 * torch.rand((seq_len, 3 * num_heads * head_dim), device=device, dtype=torch.float32) - 3
for _ in range(batch)]
return (torch.nested.nested_tensor(tensors, device=device, dtype=torch.float32),
torch.nested.nested_tensor(tensors, device=device, dtype=torch.float16))
def rand_tensor(shape):
batch, seq_len, num_heads, head_dim = shape
tensor = 6 * torch.rand((batch, seq_len, 3 * num_heads * head_dim), device=device, dtype=torch.float32) - 3
return tensor, tensor.to(dtype=torch.float16)
batch_size, seq_len, num_heads, head_dim = 16, 8, 4, 64
shape = (batch_size, seq_len, num_heads, head_dim)
# Test Packed
qkv, qkv_low_precision = rand_tensor(shape) if type == "dense" else rand_nt(shape)
query, key, value = qkv.chunk(3, dim=-1)
query_lp, key_lp, value_lp = qkv_low_precision.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
query_lp = query_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key_lp = key_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value_lp = value_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
with sdpa_kernel(backends=[fused_kernel]):
actual = torch.nn.functional.scaled_dot_product_attention(
query_lp, key_lp, value_lp, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref_lp = torch.nn.functional.scaled_dot_product_attention(
query_lp.contiguous(), key_lp.contiguous(), value_lp.contiguous(),
attn_mask=None, dropout_p=0.0, is_causal=False)
math_query = query.contiguous()
math_key = key.contiguous()
math_value = value.contiguous()
math_ref = torch.nn.functional.scaled_dot_product_attention(
math_query, math_key, math_value, attn_mask=None, dropout_p=0.0, is_causal=False)
actual_test = actual
math_ref_test = math_ref
math_ref_lp_test = math_ref_lp
if actual_test.is_nested:
actual_test = torch.nested.to_padded_tensor(actual_test.contiguous(), padding=0.0)
math_ref_test = torch.nested.to_padded_tensor(math_ref_test, padding=0.0)
math_ref_lp_test = torch.nested.to_padded_tensor(math_ref_lp_test, padding=0.0)
actual_test = actual_test.to(dtype=torch.float32).contiguous()
math_ref_test = math_ref_test.to(dtype=torch.float32).contiguous()
math_ref_lp_test = math_ref_lp_test.to(dtype=torch.float32).contiguous()
self.assertEqual(math_ref_test, math_ref_lp_test, atol=7e-3, rtol=7e-3)
self.assertEqual(actual_test, math_ref_test, atol=7e-3, rtol=7e-3)
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Efficient Attention was not built for this system")
@parametrize("contiguous_inputs", [True, False])
@parametrize("is_causal", [True, False])
def test_sdp_mem_efficient_grad_against_math(self, device, contiguous_inputs: bool, is_causal: bool):
batch_size, seq_len, num_heads, head_dim = 4, 4, 2, 16
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device,
dtype=torch.float64, requires_grad=True, packed=True)
qkv = make_tensor(SdpaShape(batch_size, num_heads, seq_len, head_dim))
qkv_lp = qkv.detach().clone().to(torch.float32).requires_grad_()
query, key, value = qkv.chunk(3, dim=-1)
query_lp, key_lp, value_lp = qkv_lp.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
query_lp = query_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key_lp = key_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value_lp = value_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
if contiguous_inputs:
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
query_lp = query_lp.contiguous()
key_lp = key_lp.contiguous()
value_lp = value_lp.contiguous()
with sdpa_kernel(backends=[SDPBackend.MATH]):
out = torch.nn.functional.scaled_dot_product_attention(query, key, value, None, 0.0, is_causal)
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
out_lp = torch.nn.functional.scaled_dot_product_attention(
query_lp, key_lp, value_lp, None, 0.0, is_causal)
rand_upward = torch.rand_like(out)
rand_upward_lp = rand_upward.to(torch.float32)
out.backward(rand_upward)
out_lp.backward(rand_upward_lp)
# Cast up and compare
self.assertEqual(qkv.grad, qkv_lp.grad.to(torch.float64), atol=1e-5, rtol=1e-5)
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Flash Attention was not built for this system")
@parametrize("contiguous_inputs", [True, False])
@parametrize("is_causal", [True, False])
@parametrize("dtype", [torch.float16, torch.bfloat16])
def test_sdp_flash_attention_grad_against_math(self, device, contiguous_inputs: bool, is_causal: bool, dtype: torch.dtype):
batch_size, seq_len, num_heads, head_dim = 4, 4, 2, 16
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device,
dtype=torch.float64, requires_grad=True, packed=True)
qkv = make_tensor(SdpaShape(batch_size, num_heads, seq_len, head_dim))
qkv_lp = qkv.detach().clone().to(dtype).requires_grad_()
query, key, value = qkv.chunk(3, dim=-1)
query_lp, key_lp, value_lp = qkv_lp.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
query_lp = query_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key_lp = key_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value_lp = value_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
if contiguous_inputs:
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
query_lp = query_lp.contiguous()
key_lp = key_lp.contiguous()
value_lp = value_lp.contiguous()
with sdpa_kernel(backends=[SDPBackend.MATH]):
out = torch.nn.functional.scaled_dot_product_attention(query, key, value, None, 0.0, is_causal)
with sdpa_kernel(backends=[SDPBackend.FLASH_ATTENTION]):
out_lp = torch.nn.functional.scaled_dot_product_attention(
query_lp, key_lp, value_lp, None, 0.0, is_causal)
rand_upward = torch.rand_like(out)
rand_upward_lp = rand_upward.to(dtype)
out.backward(rand_upward)
out_lp.backward(rand_upward_lp)
# Cast up and compare
# Since we are doing the compute on fp16 we have to bump the tolerance
# Bump down the tolearnce for blfoat16
atol = 7e-4 if dtype == torch.float16 else 7e-3
rtol = 7e-4 if dtype == torch.float16 else 7e-3
if TEST_WITH_ROCM:
atol = 9e-4 if dtype == torch.float16 else 9e-3
self.assertEqual(qkv.grad, qkv_lp.grad.to(torch.float64), atol=atol, rtol=rtol)
@skipIfRocm # Missing nested and EFFICIENT_ATTENTION
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Platform does not support fused SDPA")
@parametrize("type", ["dense", "nested"])
def test_fused_sdp_choice(self, device, type: str):
batch_size, seq_len, num_heads, head_dim = 2, 128, 8, 64
shape = SdpaShape(batch_size, num_heads, seq_len, head_dim)
make_tensor = partial(rand_sdpa_tensor, device=device, dtype=torch.float16, packed=True, requires_grad=True)
qkv = make_tensor(shape, type=type)
query, key, value = qkv.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
if type != "nested" and PLATFORM_SUPPORTS_CUDNN_ATTENTION and SM90OrLater:
self.assertEqual(torch._fused_sdp_choice(query, key, value), SDPBackend.CUDNN_ATTENTION.value)
elif PLATFORM_SUPPORTS_FLASH_ATTENTION:
self.assertEqual(torch._fused_sdp_choice(query, key, value), SDPBackend.FLASH_ATTENTION.value)
elif type != "nested" and PLATFORM_SUPPORTS_CUDNN_ATTENTION: # e.g., we're on Windows
self.assertEqual(torch._fused_sdp_choice(query, key, value), SDPBackend.CUDNN_ATTENTION.value)
else:
self.assertEqual(torch._fused_sdp_choice(query, key, value), SDPBackend.EFFICIENT_ATTENTION.value)
# Change dtype to float32 so that efficient attention should get chosen
make_tensor = partial(rand_sdpa_tensor, device=device, dtype=torch.float32, packed=True)
qkv = make_tensor(shape, type=type)
query, key, value = qkv.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
assert torch._fused_sdp_choice(query, key, value) == SDPBackend.EFFICIENT_ATTENTION.value
@skipIfRocm # Missing triton.float32 ("triton" prefix is to locate skipped UTs), and deterministic algo
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Platform does not support fused SDPA")
@parametrize("warn_only", [True, False])
def test_sdp_choice_with_determinism(self, device, warn_only):
batch_size, seq_len, num_heads, head_dim = 1, 64, 8, 64
shape = SdpaShape(batch_size, num_heads, seq_len, head_dim)
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device, dtype=torch.float32, packed=False)
query, key, value = make_tensor(shape), make_tensor(shape), make_tensor(shape)
with use_deterministic_algorithims(True, warn_only=warn_only):
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION, SDPBackend.MATH]):
assert torch._fused_sdp_choice(query, key, value) == SDPBackend.EFFICIENT_ATTENTION.value
@skipIfRocm # Missing deterministic algo
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("fused_kernel", PLATFORM_SPECIFIC_SDPA)
@parametrize("warn_only", [True, False])
def test_fused_backwards_throws_determinism_warning(self, device, warn_only, fused_kernel):
batch_size, seq_len, num_heads, head_dim = 1, 64, 8, 64
shape = SdpaShape(batch_size, num_heads, seq_len, head_dim)
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device, dtype=torch.float16, packed=False, requires_grad=True)
query, key, value = make_tensor(shape), make_tensor(shape), make_tensor(shape)
kernel_name = "Memory Efficient attention" if fused_kernel == SDPBackend.EFFICIENT_ATTENTION else \
"Flash Attention" if fused_kernel == SDPBackend.FLASH_ATTENTION else "cuDNN Attention"
warning_context = (
self.assertWarnsRegex(
UserWarning,
f"{kernel_name} defaults to a non-deterministic algorithm.",
)
if warn_only
else contextlib.nullcontext()
)
with use_deterministic_algorithims(True, warn_only=warn_only):
with sdpa_kernel(backends=[fused_kernel]):
with warning_context:
if warn_only or fused_kernel != SDPBackend.CUDNN_ATTENTION:
torch.nn.functional.scaled_dot_product_attention(query, key, value).sum().backward()
else:
# cuDNN attention has no deterministic fallback
self.assertRaises(RuntimeError, lambda:
torch.nn.functional.scaled_dot_product_attention(query, key, value).sum().backward())
@unittest.skip("This test is not behaving deterministaclly non-deterministaclly on CI/CD")
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Platform does not support fused SDPA")
def test_mem_eff_backwards_determinism(self, device):
# Need big seq_len to ensure that num_splits > 1
dtype = torch.float32
batch_size, seq_len, n_heads, head_dim = 1, 1024, 8, 64
query = torch.rand(batch_size, n_heads, seq_len, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, n_heads, seq_len, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, n_heads, seq_len, head_dim,
device=device, dtype=dtype, requires_grad=True)
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
# Run once to establish baseline
out = F.scaled_dot_product_attention(query, key, value)
upward_grad = torch.rand_like(out)
out.backward(upward_grad)
intial_query_grad = query.grad
# Re-run the op with the same upward grad and check that the backward is
# not deterministic
diff_anwser_once = False
for _ in range(100):
query.grad = None
out = F.scaled_dot_product_attention(query, key, value)
out.backward(upward_grad)
if not torch.equal(intial_query_grad, query.grad):
diff_anwser_once = True
break
self.assertTrue(diff_anwser_once)
with use_deterministic_algorithims(True, warn_only=False):
query.grad = None
out = F.scaled_dot_product_attention(query, key, value)
upward_grad = torch.rand_like(out)
out.backward(upward_grad)
intial_query_grad = query.grad
# Re-run the op with the same upward grad and check that the backward is
# deterministic now that we have enforced it
diff_anwser_once = False
for _ in range(100):
query.grad = None
out = F.scaled_dot_product_attention(query, key, value)
out.backward(upward_grad)
if not torch.equal(intial_query_grad, query.grad):
diff_anwser_once = True
break
self.assertFalse(diff_anwser_once)
# verified passing successfully on H100
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Does not support SDPA")
@unittest.skipIf(IS_JETSON, "causing sigkill on Jetson")
@parametrize("batch_size", [1, 8])
@parametrize("seq_len_q", [8, 103, 1024, 2048] if MEM_EFF_CAPABILITY_MATCHES_SM80
else [4, 8, 256, 512])
@parametrize("seq_len_k", [8, 103, 1024, 2048] if MEM_EFF_CAPABILITY_MATCHES_SM80
else [4, 8, 256, 512])
@parametrize("head_dim", [8, 16, 96, 128] if MEM_EFF_CAPABILITY_MATCHES_SM80
else [8, 16, 32, 64])
@parametrize("is_causal", [False, True])
@parametrize("dropout_p", [0.0, 0.22])
@parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32] if MEM_EFF_CAPABILITY_MATCHES_SM80
else [torch.float16, torch.float32])
@parametrize("scale", [None, "l1"])
def test_mem_efficient_attention_vs_math_ref_grads(self, device, batch_size: int, seq_len_q: int, seq_len_k: int,
head_dim: int, is_causal: bool, dropout_p: float, dtype: torch.dtype,
scale: str):
def _get_mem_eff_drop_mask(batch_size, n_heads, q_len, kv_len, p, seed, offset, device=device):
mask = torch.empty((batch_size, n_heads, q_len, kv_len), device=device, dtype=torch.float32)
rand_uniform = torch._fill_mem_eff_dropout_mask_(mask, p, seed, offset)
mask = (rand_uniform > p).to(torch.float32)
return mask
if max(seq_len_q, seq_len_k) >= 2048 and torch.cuda.get_device_properties('cuda').total_memory < 40 * 2**30:
unittest.skip("Reference implementation OOM")
return
if TEST_WITH_ROCM and seq_len_q * seq_len_k * head_dim * batch_size > 1024 * 1024 * 128:
torch.cuda.empty_cache() # Prevent memory fragmentation
if TEST_WITH_ROCM and is_causal and seq_len_q != seq_len_k:
self.skipTest("ROCm does not accept is_casual when seq_len_q != seq_len_k")
seed = 42
scale = scale if scale is None else (1 / head_dim)
n_heads = 4
query = torch.rand(batch_size, n_heads, seq_len_q, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, n_heads, seq_len_k, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, n_heads, seq_len_k, head_dim,
device=device, dtype=dtype, requires_grad=True)
higher_precision_dtype = torch.float64
query_ref, key_ref, value_ref = query_key_value_clones(query, key, value, dtype=higher_precision_dtype)
# Create real output
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
# Set the seed and run the kernel
torch.manual_seed(seed)
out = F.scaled_dot_product_attention(query, key, value, dropout_p=dropout_p, is_causal=is_causal, scale=scale)
if dropout_p == 0.0:
with sdpa_kernel(backends=[SDPBackend.MATH]):
# High Precision Math Reference
out_ref = F.scaled_dot_product_attention(query_ref, key_ref, value_ref,
dropout_p=dropout_p, is_causal=is_causal, scale=scale)
# Low Precision Math Reference
out_lp_ref = F.scaled_dot_product_attention(query, key, value,
dropout_p=dropout_p, is_causal=is_causal, scale=scale)
else:
if seq_len_q > 1024:
self.skipTest("Will call _fill_mem_eff_dropout_mask with too many threads!")
# Create the dropout_mask
torch.manual_seed(seed)
dropout_mask = _get_mem_eff_drop_mask(batch_size, n_heads, seq_len_q, seq_len_k, dropout_p, seed, 0, device=device)
# High Precision Math Reference
out_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref, key_ref, value_ref, dropout_p=dropout_p, is_causal=is_causal, scale=scale, dropout_mask=dropout_mask)[0]
# Low Precision Math Reference
out_lp_ref = torch.ops.aten._scaled_dot_product_attention_math(
query, key, value, dropout_p=dropout_p, is_causal=is_causal, scale=scale,
dropout_mask=dropout_mask)[0]
upstream_grad = torch.rand_like(out, requires_grad=False)
grads = torch.autograd.grad(out, (query, key, value), upstream_grad)
grads_ref_lp = torch.autograd.grad(out_lp_ref, (query, key, value), upstream_grad)
grads_ref = torch.autograd.grad(out_ref, (query_ref, key_ref, value_ref), upstream_grad)
fudge_factors = {
'out': 3.0 ,
'grad_query': 150.0 ,
'grad_key': 25.0,
'grad_value': 8.5,
}
if TEST_WITH_ROCM:
fudge_factors['grad_key'] = 45.0
fudge_factors['grad_query'] = 360.0
if seq_len_k >= 1024:
fudge_factors['grad_key'] = 70.0
if seq_len_k >= 2048:
fudge_factors['grad_key'] = 160.0
fudge_factors['grad_query'] = 650.0
if dtype == torch.float32:
fudge_factors['grad_key'] = 90.0
check_out_and_grad(
(out_ref, out_lp_ref, out),
*zip(grads_ref, grads_ref_lp, grads),
fudge_factors=fudge_factors,
)
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Does not support SDPA")
@unittest.skipIf(IS_JETSON, "causing sigkill on Jetson")
@parametrize("batch_size", [1, 8])
@parametrize("seq_len_q", [8, 312, 1024, 2048] if MEM_EFF_CAPABILITY_MATCHES_SM80
else [8, 152, 512])
@parametrize("seq_len_k", [8, 408, 1024, 2048] if MEM_EFF_CAPABILITY_MATCHES_SM80
else [8, 37, 512])
@parametrize("head_dim", [8, 16, 96, 128] if MEM_EFF_CAPABILITY_MATCHES_SM80
else [8, 16, 32, 64])
@parametrize("is_causal", [False])
@parametrize("dropout_p", [0.0, 0.22])
@parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32] if MEM_EFF_CAPABILITY_MATCHES_SM80
else [torch.float16, torch.float32])
@parametrize("scale", [None, "l1"])
def test_mem_efficient_attention_attn_mask_vs_math_ref_grads(self, device, batch_size: int, seq_len_q: int,
seq_len_k: int, head_dim: int, is_causal: bool,
dropout_p: float, dtype: torch.dtype,
scale: str):
def _get_mem_eff_drop_mask(batch_size, n_heads, q_len, kv_len, p, seed, offset, device=device):
mask = torch.empty((batch_size, n_heads, q_len, kv_len), device=device, dtype=torch.float32)
rand_uniform = torch._fill_mem_eff_dropout_mask_(mask, p, seed, offset)
mask = (rand_uniform > p).to(torch.float32)
return mask
if max(seq_len_q, seq_len_k) >= 2048 and torch.cuda.get_device_properties('cuda').total_memory < 40 * 2**30:
unittest.skip("Reference implementation OOM")
return
if TEST_WITH_ROCM and dtype == torch.float32:
unittest.skip("Skip fp32 attn_mask gradients on ROCM, for now.")
return
if TEST_WITH_ROCM and seq_len_q * seq_len_k * head_dim * batch_size > 1024 * 1024 * 128:
torch.cuda.empty_cache() # Prevent memory fragmentation
seed = 42
scale = scale if scale is None else (1 / head_dim)
n_heads = 4
query = torch.rand(batch_size, n_heads, seq_len_q, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, n_heads, seq_len_k, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, n_heads, seq_len_k, head_dim,
device=device, dtype=dtype, requires_grad=True)
attn_mask = torch.rand(seq_len_q, seq_len_k, device=device, dtype=dtype, requires_grad=True)
higher_precision_dtype = torch.float64 if dtype == torch.float32 else torch.float32
query_ref, key_ref, value_ref = query_key_value_clones(query, key, value, dtype=higher_precision_dtype)
attn_mask_ref = attn_mask.detach().to(higher_precision_dtype).requires_grad_(True)
# Create real output
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
# Set the seed and run the kernel
torch.manual_seed(seed)
out = F.scaled_dot_product_attention(query, key, value, attn_mask, dropout_p=dropout_p,
is_causal=is_causal, scale=scale)
if dropout_p == 0.0:
with sdpa_kernel(backends=[SDPBackend.MATH]):
# High Precision Math Reference
out_ref = F.scaled_dot_product_attention(query_ref, key_ref, value_ref, attn_mask_ref,
dropout_p=dropout_p, is_causal=is_causal, scale=scale)
# Low Precision Math Reference
out_lp_ref = F.scaled_dot_product_attention(query, key, value, attn_mask,
dropout_p=dropout_p, is_causal=is_causal, scale=scale)
else:
if seq_len_q > 1024:
self.skipTest("Will call _fill_mem_eff_dropout_mask with too many threads!")
# Create the dropout_mask
torch.manual_seed(seed)
dropout_mask = _get_mem_eff_drop_mask(batch_size, n_heads, seq_len_q,
seq_len_k, dropout_p, seed, 0, device=device)
# High Precision Math Reference
out_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref, key_ref, value_ref, attn_mask_ref, dropout_p=dropout_p, is_causal=is_causal,
scale=scale, dropout_mask=dropout_mask)[0]
# Low Precision Math Reference
out_lp_ref = torch.ops.aten._scaled_dot_product_attention_math(
query, key, value, attn_mask,
dropout_p=dropout_p, is_causal=is_causal, scale=scale,
dropout_mask=dropout_mask)[0]
upstream_grad = torch.rand_like(out, requires_grad=False)
grads = torch.autograd.grad(out, (query, key, value, attn_mask), upstream_grad)
grads_ref_lp = torch.autograd.grad(out_lp_ref, (query, key, value, attn_mask), upstream_grad)
grads_ref = torch.autograd.grad(out_ref, (query_ref, key_ref, value_ref, attn_mask_ref), upstream_grad)
fudge_factors = {
"out": 4,
"grad_query": 150.0,
"grad_key": 25.0,
"grad_value": 8.0,
"grad_attn_mask": 45.0,
}
if TEST_WITH_ROCM:
fudge_factors['grad_key'] = 45.0
fudge_factors['grad_query'] = 360.0
if seq_len_k >= 1024:
fudge_factors['grad_key'] = 70.0
if seq_len_k >= 2048:
fudge_factors['grad_key'] = 160.0
fudge_factors['grad_query'] = 650.0
if dtype == torch.float32:
fudge_factors['grad_key'] = 90.0
check_out_and_grad(
(out_ref, out_lp_ref, out),
*zip(grads_ref, grads_ref_lp, grads),
fudge_factors=fudge_factors,
)
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Does not support SDPA or pre-SM80 hardware")
@unittest.skipIf(IS_JETSON, "causing sigkill on Jetson")
@parametrize("batch_size", [1, 8])
@parametrize("seq_len_q", [4, 143, 2048])
@parametrize("seq_len_k", [4, 127, 579, 2048])
@parametrize("head_dim", [8, 203, 256])
@parametrize("is_causal", [True, False])
@parametrize("dropout_p", [0.0, 0.22, 0.48])
@parametrize("dtype", [torch.float16, torch.bfloat16])
@parametrize("scale", [None, "l1"])
@parametrize("enable_gqa", [True, False] if not TEST_WITH_ROCM else [False])
@parametrize("n_heads", [[16, 8], [10, 2]])
def test_flash_attention_vs_math_ref_grads(self, device, batch_size: int, seq_len_q: int, seq_len_k: int,
head_dim: int, is_causal: bool, dropout_p: float, dtype: torch.dtype,
scale: str, enable_gqa: bool, n_heads: List[int]):
if isSM8XDevice and head_dim in range(193, 256 + 1):
self.skipTest("Flash attention on sm86, sm87, and sm89 for headdim > 192 currently disabled")
if is_causal and seq_len_q != seq_len_k:
self.skipTest("Flash V2 does not accept is_casual when seq_len_q != seq_len_k")
if TEST_WITH_ROCM and seq_len_q >= 1024 and seq_len_k >= 1024 and batch_size > 1:
torch.cuda.empty_cache() # Prevent memory fragmentation
if max(seq_len_q, seq_len_k) >= 2048 and torch.cuda.get_device_properties('cuda').total_memory < 40 * 2**30:
unittest.skip("Reference implementation OOM")
return
scale = scale if scale is None else (1 / head_dim)
num_heads_q = num_heads_kv = 4
if enable_gqa:
num_heads_q = n_heads[0]
num_heads_kv = n_heads[1]
query = torch.rand(batch_size, num_heads_q, seq_len_q, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, num_heads_kv, seq_len_k, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, num_heads_kv, seq_len_k, head_dim,
device=device, dtype=dtype, requires_grad=True)
higher_precision_dtype = torch.float64 if dtype == torch.float32 else torch.float32
query_ref, key_ref, value_ref = query_key_value_clones(query, key, value, dtype=higher_precision_dtype)
is_dropout = dropout_p > 0.0
if not is_dropout:
with sdpa_kernel(backends=[SDPBackend.FLASH_ATTENTION]):
out = F.scaled_dot_product_attention(
query, key, value, dropout_p=dropout_p, is_causal=is_causal, scale=scale, enable_gqa=enable_gqa)
with sdpa_kernel(backends=[SDPBackend.MATH]):
# High Precision Math Reference
out_ref = F.scaled_dot_product_attention(
query_ref, key_ref, value_ref, is_causal=is_causal, scale=scale, enable_gqa=enable_gqa)
# Low Precision Math Reference
out_lp_ref = F.scaled_dot_product_attention(
query, key, value, is_causal=is_causal, scale=scale, enable_gqa=enable_gqa)
else:
# Problem: We pad sizes in the composite region of the top level SDPA. But we need the
# Debug mask when have dropout. So I am going to manualy pad up here when testing dropout
q_padded, q_og_size = pad_last_dim(query, 8)
k_padded, k_og_size = pad_last_dim(key, 8)
v_padded, v_og_size = pad_last_dim(value, 8)
# scale needs to be calculated on the og head_size
if scale is None:
scale = 1 / math.sqrt(q_og_size)
output_tuple = torch.ops.aten._scaled_dot_product_flash_attention(
q_padded, k_padded, v_padded, dropout_p=dropout_p, is_causal=is_causal, scale=scale, return_debug_mask=is_dropout)
out = output_tuple[0]
out = out[..., :v_og_size]
# Build dropout_mask
dbug_mask = output_tuple[-1]
query_padding_mask = torch.ones(
batch_size, seq_len_q, device=device, dtype=torch.bool)
key_padding_mask = torch.ones(
batch_size, seq_len_k, device=device, dtype=torch.bool)
softmax_mask = self.convert_flash_attn_S_to_softmax(
dbug_mask, seq_len_q, seq_len_k, query_padding_mask, key_padding_mask,
causal=is_causal)[:, :, :seq_len_q, :seq_len_k]
dropout_mask = softmax_mask >= 0
# High Precision Math Reference
out_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref, key_ref, value_ref, dropout_p=dropout_p, is_causal=is_causal,
scale=scale, dropout_mask=dropout_mask, enable_gqa=enable_gqa)[0]
# Low Precision Math Reference
out_lp_ref = torch.ops.aten._scaled_dot_product_attention_math(
query, key, value, dropout_p=dropout_p, is_causal=is_causal, scale=scale,
dropout_mask=dropout_mask, enable_gqa=enable_gqa)[0]
upstream_grad = torch.rand_like(out, requires_grad=False)
# backward for flash attention on sm86, sm87, and sm89 for headdim >= 193 currently disabled
if isSM8XDevice and head_dim in range(193, 256):
self.assertRaises(RuntimeError, lambda: out.backward(upstream_grad))
return
grads = torch.autograd.grad(out, (query, key, value), upstream_grad)
grads_ref_lp = torch.autograd.grad(out_lp_ref, (query, key, value), upstream_grad)
grads_ref = torch.autograd.grad(out_ref, (query_ref, key_ref, value_ref), upstream_grad)
fudge_factors = {
'out': 4,
'grad_query': 160.0,
'grad_key': 16,
'grad_value': 4,
}
if TEST_WITH_ROCM:
fudge_factors['grad_key'] = 45.0
fudge_factors['grad_query'] = 360.0
if seq_len_k >= 1024:
fudge_factors['grad_key'] = 70.0
if seq_len_k >= 2048:
fudge_factors['grad_key'] = 190.0
fudge_factors['grad_query'] = 650.0
if seq_len_q >= 2048:
fudge_factors['grad_query'] = 1100.0
if dtype == torch.float32:
fudge_factors['grad_key'] = 90.0
check_out_and_grad(
(out_ref, out_lp_ref, out),
*zip(grads_ref, grads_ref_lp, grads),
fudge_factors=fudge_factors,
)
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Does not support SDPA or pre-SM80 hardware")
@parametrize("batch_size", [1, 8])
@parametrize("seq_len_q", [256, 1024])
@parametrize("seq_len_k", [256, 1024])
@parametrize("head_dim", [32, 64])
@parametrize("is_causal", [True, False])
@parametrize("dropout_p", [0.0, 0.22])
@parametrize("dtype", [torch.float16])
@parametrize("scale", [None, "l1"])
@parametrize("fused_kernel", PLATFORM_SPECIFIC_SDPA)
def test_fused_attention_vs_math_ref_grads_cudagraph(self, device, batch_size: int,
seq_len_q: int, seq_len_k: int,
head_dim: int,
is_causal: bool,
dropout_p: float,
dtype: torch.dtype,
scale: str,
fused_kernel: SDPBackend):
def _get_mem_eff_drop_mask(batch_size, n_heads, q_len, kv_len, dropout_p, seed, offset, device=device):
mask = torch.empty((batch_size, n_heads, q_len, kv_len), device=device, dtype=torch.float32)
rand_uniform = torch._fill_mem_eff_dropout_mask_(mask, dropout_p, seed, offset)
mask = (rand_uniform > dropout_p).to(torch.float32)
return mask
def get_dropout_mask(output, fused_kernel, batch_size, n_heads, q_len, kv_len, dropout_p, device=device):
if fused_kernel == SDPBackend.EFFICIENT_ATTENTION:
output_seed, output_offset = output_tuple[2], output_tuple[3]
output_seed = output_seed.item()
output_offset = output_offset.item()
return _get_mem_eff_drop_mask(batch_size, n_heads, q_len, kv_len,
dropout_p, output_seed, output_offset, device=device)
else:
# Build dropout_mask
dbug_mask = output_tuple[-1]
query_padding_mask = torch.ones(
batch_size, seq_len_q, device=device, dtype=torch.bool)
key_padding_mask = torch.ones(
batch_size, seq_len_k, device=device, dtype=torch.bool)
softmax_mask = self.convert_flash_attn_S_to_softmax(
dbug_mask, seq_len_q, seq_len_k, query_padding_mask, key_padding_mask,
causal=is_causal)[:, :, :seq_len_q, :seq_len_k]
dropout_mask = softmax_mask >= 0
return dropout_mask
if fused_kernel == SDPBackend.FLASH_ATTENTION and is_causal and seq_len_q != seq_len_k:
self.skipTest("Flash V2 does not accept is_casual when seq_len_q != seq_len_k")
if TEST_WITH_ROCM and is_causal and seq_len_q != seq_len_k:
self.skipTest("ROCm does not accept is_casual when seq_len_q != seq_len_k")
seed = 42
n_heads = 4
query = torch.rand(batch_size, n_heads, seq_len_q, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, n_heads, seq_len_k, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, n_heads, seq_len_k, head_dim,
device=device, dtype=dtype, requires_grad=True)
fused_op = (torch.ops.aten._scaled_dot_product_efficient_attention
if fused_kernel == SDPBackend.EFFICIENT_ATTENTION else torch.ops.aten._scaled_dot_product_flash_attention
if fused_kernel == SDPBackend.FLASH_ATTENTION else torch.ops.aten._scaled_dot_product_cudnn_attention)
higher_precision_dtype = torch.float64 if dtype == torch.float32 else torch.float32
query_ref, key_ref, value_ref = query_key_value_clones(query, key, value, dtype=higher_precision_dtype)
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
# Set the global seed before capture
torch.manual_seed(seed)
kwargs = {"dropout_p": dropout_p, "is_causal": is_causal}
if fused_kernel == SDPBackend.EFFICIENT_ATTENTION:
kwargs["compute_log_sumexp"] = True
kwargs["attn_bias"] = None
if fused_kernel == SDPBackend.FLASH_ATTENTION:
kwargs['return_debug_mask'] = dropout_p > 0.0
if fused_kernel == SDPBackend.CUDNN_ATTENTION:
kwargs["compute_log_sumexp"] = True
kwargs["attn_bias"] = None
if "return_debug_mask" in kwargs:
kwargs.pop("return_debug_mask")
with torch.cuda.stream(s):
# Create real output
output_tuple = fused_op(query, key, value, **kwargs)
torch.cuda.current_stream().wait_stream(s)
out = output_tuple[0]
upstream_grad = torch.rand_like(out, requires_grad=False)
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
out.backward(upstream_grad)
for x in (query, key, value):
x.grad = None
g = torch.cuda.CUDAGraph()
# Create real output
with torch.cuda.graph(g):
tmp = torch.rand_like(query, device=query.device) # test non-zero intragraph offset
# Create real output
output_tuple = fused_op(query, key, value, **kwargs)
assert all(not isinstance(o, torch.Tensor) or o.is_cuda for o in output_tuple)
g.replay()
out_first = output_tuple[0].clone()
g.replay()
out = output_tuple[0]
if dropout_p == 0.0:
self.assertEqual(out_first, out, atol=0, rtol=0)
else:
# replays produce different results
self.assertNotEqual(out_first, out)
with sdpa_kernel(backends=[SDPBackend.MATH]):
if dropout_p == 0.0:
# High Precision Math Reference
out_ref = F.scaled_dot_product_attention(query_ref, key_ref, value_ref,
dropout_p=dropout_p, is_causal=is_causal)
# Low Precision Math Reference
out_lp_ref = F.scaled_dot_product_attention(query, key, value,
dropout_p=dropout_p, is_causal=is_causal)
# cuDNN attention doesn't support returning dropout mask
elif fused_kernel != SDPBackend.CUDNN_ATTENTION:
# Create the dropout_mask
dropout_mask = get_dropout_mask(output_tuple, fused_kernel, batch_size,
n_heads, seq_len_q, seq_len_k, dropout_p, device)
# High Precision Math Reference
out_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref, key_ref, value_ref, dropout_p=dropout_p, is_causal=is_causal,
dropout_mask=dropout_mask)[0]
# Low Precision Math Reference
out_lp_ref = torch.ops.aten._scaled_dot_product_attention_math(
query, key, value, dropout_p=dropout_p, is_causal=is_causal,
dropout_mask=dropout_mask)[0]
g1 = torch.cuda.CUDAGraph()
with torch.cuda.graph(g1):
grads = torch.autograd.grad(out, (query, key, value), upstream_grad)
g1.replay()
if fused_kernel != SDPBackend.CUDNN_ATTENTION or dropout_p == 0.0:
grads_ref_lp = torch.autograd.grad(out_lp_ref, (query, key, value), upstream_grad)
grads_ref = torch.autograd.grad(out_ref, (query_ref, key_ref, value_ref), upstream_grad)
check_out_and_grad(
(out_ref, out_lp_ref, out),
*zip(grads_ref, grads_ref_lp, grads),
fudge_factors={
'out': 3.0,
'grad_query': 100.0,
'grad_key': 8.0,
'grad_value': 3.0,
}
)
@skipIfRocm # Nested Tensor
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("fused_kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION] if
PLATFORM_SUPPORTS_FLASH_ATTENTION else [SDPBackend.EFFICIENT_ATTENTION])
def test_fused_kernels_seq_len_1_inputs(self, device, fused_kernel):
rand_nested_tensor = partial(rand_sdpa_tensor, type="nested", device=device, dtype=torch.float16)
batch, num_heads, head_dim = 32, 16, 64
seq_lens = torch.randint(low=1, high=32, size=(batch,))
# make sure some seq_lens are 1
num_ones = 10
indices = torch.randint(low=0, high=batch, size=(num_ones,))
seq_lens.scatter_(0, indices, 1)
shape = SdpaShape(batch, num_heads, seq_lens.tolist(), head_dim)
query = rand_nested_tensor(shape)
key = rand_nested_tensor(shape)
value = rand_nested_tensor(shape)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
with sdpa_kernel(backends=[fused_kernel]):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query.contiguous().to(torch.float32),
key.contiguous().to(torch.float32),
value.contiguous().to(torch.float32),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous().to(torch.float16), atol=1e-3, rtol=1e-2)
@skipIfRocm # Nested tensor
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION] if
PLATFORM_SUPPORTS_FLASH_ATTENTION else [SDPBackend.EFFICIENT_ATTENTION])
@parametrize("expand_q_batch", [True, False])
@parametrize("expand_k_batch", [True, False])
@parametrize("expand_v_batch", [True, False])
@parametrize("expand_q_num_heads", [True, False])
@parametrize("expand_k_num_heads", [True, False])
@parametrize("expand_v_num_heads", [True, False])
def test_fused_kernels_nested_broadcasting(
self,
device,
kernel,
expand_q_batch,
expand_k_batch,
expand_v_batch,
expand_q_num_heads,
expand_k_num_heads,
expand_v_num_heads,
):
is_efficient = kernel == SDPBackend.EFFICIENT_ATTENTION
dtype = torch.float32 if is_efficient else torch.float16
rand_nested_tensor = partial(rand_sdpa_tensor, type="nested", device=device, dtype=dtype)
batch, num_heads, head_dim = 32, 8, 64
head_dim_v = 32 if is_efficient else head_dim
seq_lens_q = (torch.randint(low=1, high=5, size=(1,)).item()
if expand_q_batch
else torch.randint(low=1, high=32, size=(batch,)).tolist())
seq_lens_kv = (torch.randint(low=1, high=5, size=(1,)).item()
if (expand_k_batch or expand_v_batch)
else torch.randint(low=1, high=32, size=(batch,)).tolist())
batch_q = 1 if expand_q_batch else batch
batch_k = 1 if expand_k_batch else batch
batch_v = 1 if expand_v_batch else batch
# handle case where all batch_sizes are 1
batch = max(batch_q, batch_k, batch_v)
num_heads_q = 1 if expand_q_num_heads else num_heads
num_heads_k = 1 if expand_k_num_heads else num_heads
num_heads_v = 1 if expand_v_num_heads else num_heads
# handle case where all num_heads are 1
num_heads = max(num_heads_q, num_heads_k, num_heads_v)
q_shape = SdpaShape(batch_q, num_heads_q, seq_lens_q, head_dim)
k_shape = SdpaShape(batch_k, num_heads_k, seq_lens_kv, head_dim)
v_shape = SdpaShape(batch_v, num_heads_v, seq_lens_kv, head_dim_v)
query = rand_nested_tensor(q_shape)
key = rand_nested_tensor(k_shape)
value = rand_nested_tensor(v_shape)
def _broadcast(t, batch_broadcasted, num_heads_broadcasted):
if batch_broadcasted and num_heads_broadcasted:
# (1, seq_len, 1, head_dim) -> (batch, seq_len, num_heads, head_dim)
result = torch.nested.nested_tensor(
[t[0].expand(-1, num_heads, t.size(-1)) for _ in range(batch)], dtype=torch.float32)
elif batch_broadcasted:
# (1, seq_len, num_heads, head_dim) -> (batch, seq_len, num_heads, head_dim)
result = torch.nested.nested_tensor([t[0] for _ in range(batch)], dtype=torch.float32)
elif num_heads_broadcasted:
# (batch, seq_len, 1, head_dim) -> (batch, seq_len, num_heads, head_dim)
result = torch.nested.nested_tensor([x.expand(-1, num_heads, t.size(-1))
for x in t.unbind()], dtype=torch.float32)
else:
result = t.to(torch.float32)
return result
query_expanded = _broadcast(query, expand_q_batch, expand_q_num_heads).transpose(1, 2)
key_expanded = _broadcast(key, expand_k_batch, expand_k_num_heads).transpose(1, 2)
value_expanded = _broadcast(value, expand_v_batch, expand_v_num_heads).transpose(1, 2)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
with sdpa_kernel(backends=[kernel]):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query_expanded.contiguous(), key_expanded.contiguous(), value_expanded.contiguous(),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous().to(dtype), atol=1.5e-3, rtol=1e-2)
@skipIfRocm # Nested tensor
@unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Fused SDPA was not built for this system")
def test_fused_kernels_nested_broadcasting_query_dense(self, device):
rand_nested_tensor = partial(rand_sdpa_tensor, type="nested", device=device, dtype=torch.float32)
batch, num_heads, head_dim, head_dim_v = 32, 16, 64, 96
seq_lens = torch.randint(low=1, high=32, size=(batch,)).tolist()
q_shape = (1, 1, num_heads, head_dim)
k_shape = SdpaShape(batch, num_heads, seq_lens, head_dim)
v_shape = SdpaShape(batch, 1, seq_lens, head_dim_v)
# create a dense query
query = torch.randn(q_shape, device=device, dtype=torch.float32)
key = rand_nested_tensor(k_shape)
value = rand_nested_tensor(v_shape)
# (1, 1, num_heads, head_dim) -> (batch, 1, num_heads, head_dim)
query_expanded = torch.nested.nested_tensor([query.squeeze(0) for _ in range(batch)]).transpose(1, 2)
# (batch, seq_lens, 1, head_dim) -> (batch, seq_lens, num_heads, head_dim)
value_expanded = torch.nested.nested_tensor(
[t.expand(-1, num_heads, head_dim_v) for t in value.unbind()]).transpose(1, 2)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION]):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query_expanded.contiguous(), key.contiguous(), value_expanded.contiguous(),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous(), atol=1e-3, rtol=1e-2)
@skipIfRocm # Nested tensor
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Does not support SDPA or pre-SM80 hardware")
@parametrize("batch_size", [8, 32])
@parametrize("max_seq_len_q", [32, 256])
@parametrize("max_seq_len_kv", [32, 256])
@parametrize("head_dim", [8, 64])
@parametrize("dropout_p", [0.0, 0.1])
@parametrize("dtype", [torch.float16])
@parametrize("scale", [None, "l1"])
@parametrize("is_causal", [True, False])
def test_flash_attention_vs_math_ref_grads_nestedtensor(self, device, batch_size: int, max_seq_len_q: int, max_seq_len_kv: int,
head_dim: int, dropout_p: float, dtype: torch.dtype,
scale: str, is_causal: bool):
if is_causal:
# TODO we should support this
self.assertRaisesRegex(RuntimeError, "Nested tensors for query / key are not supported when is_causal=True")
return
scale = scale if scale is None else (1 / head_dim)
n_heads = 4
seq_lens_q = torch.randint(low=1, high=max_seq_len_q, size=(batch_size,))
# Set one entry to max length
seq_lens_q[torch.randint(0, batch_size, size=(1,))] = max_seq_len_q
seq_lens_kv = torch.randint(low=1, high=max_seq_len_kv, size=(batch_size,))
seq_lens_kv[torch.randint(0, batch_size, size=(1,))] = max_seq_len_kv
def rand_nt(sequence_list, num_heads, head_dim):
tensors = [torch.rand((num_heads, seq_len, head_dim)) for seq_len in sequence_list]
return torch.nested.nested_tensor(tensors, requires_grad=True, device=device, dtype=dtype)
query = rand_nt(seq_lens_q, n_heads, head_dim)
key = rand_nt(seq_lens_kv, n_heads, head_dim)
value = rand_nt(seq_lens_kv, n_heads, head_dim)
# Run the math kernel on low precision references
query_ref_lp = query.clone().detach().requires_grad_(True)
key_ref_lp = key.clone().detach().requires_grad_(True)
value_ref_lp = value.clone().detach().requires_grad_(True)
query_ref = query.clone().detach().to(torch.float32).requires_grad_(True)
key_ref = key.clone().detach().to(torch.float32).requires_grad_(True)
value_ref = value.clone().detach().to(torch.float32).requires_grad_(True)
is_dropout = dropout_p > 0.0
if not is_dropout:
with sdpa_kernel(backends=[SDPBackend.FLASH_ATTENTION]):
out = F.scaled_dot_product_attention(query, key, value, dropout_p=dropout_p, is_causal=is_causal, scale=scale)
with sdpa_kernel(backends=[SDPBackend.MATH]):
# High Precision Math Reference
out_ref = F.scaled_dot_product_attention(
query_ref, key_ref, value_ref, is_causal=is_causal, scale=scale)
# Low Precision Math Reference
out_lp_ref = F.scaled_dot_product_attention(
query_ref_lp, key_ref_lp, value_ref_lp, is_causal=is_causal, scale=scale)
else:
# Create real output
output_tuple = torch.ops.aten._scaled_dot_product_flash_attention(
query, key, value, dropout_p=dropout_p, is_causal=is_causal,
scale=scale, return_debug_mask=is_dropout)
out = output_tuple[0]
dbug_mask = output_tuple[-1]
query_padding_mask = torch.arange(max_seq_len_q).unsqueeze(0).expand(
batch_size, max_seq_len_q
) < seq_lens_q.unsqueeze(-1)
query_padding_mask = query_padding_mask.to("cuda")
key_padding_mask = torch.arange(max_seq_len_kv).unsqueeze(0).expand(
batch_size, max_seq_len_kv
) < seq_lens_kv.unsqueeze(-1)
key_padding_mask = key_padding_mask.to("cuda")
softmax_mask = self.convert_flash_attn_S_to_softmax(
dbug_mask, max_seq_len_q, max_seq_len_kv, query_padding_mask, key_padding_mask, causal=is_causal)
dropout_mask = softmax_mask >= 0
nt_stack = []
for tensor_component in range(batch_size):
batch_stack = []
for head in range(n_heads):
batch_stack.append(dropout_mask[tensor_component, head,
0:seq_lens_q[tensor_component],
0:seq_lens_kv[tensor_component]].unsqueeze(0))
nt_stack.append(torch.cat(batch_stack))
nested_dropout_mask = torch.nested.nested_tensor(nt_stack)
# High Precision Math Reference
out_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref, key_ref, value_ref, dropout_p=dropout_p,
is_causal=is_causal, scale=scale, dropout_mask=nested_dropout_mask)[0]
# Low Precision Math Reference
out_lp_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref_lp, key_ref_lp, value_ref_lp, dropout_p=dropout_p, is_causal=is_causal, scale=scale,
dropout_mask=nested_dropout_mask)[0]
upstream_grad = out.detach().clone().contiguous()
out.backward(upstream_grad)
out_ref.backward(upstream_grad.to(out_ref.dtype))
out_lp_ref.backward(upstream_grad.to(out_lp_ref.dtype))
dropout_fudge_factor = 1.0 if dropout_p == 0.0 else 2.0
check_out_and_grad(
(out_ref, out_lp_ref, out),
(query_ref, query_ref_lp, query),
(key_ref, key_ref_lp, key),
(value_ref, value_ref_lp, value),
fudge_factors={
'out': 1.5 * dropout_fudge_factor,
'grad_query': 12.0 * dropout_fudge_factor,
'grad_key': 1.5 * dropout_fudge_factor,
'grad_value': 2.0 * dropout_fudge_factor,
}
)
class TestAttnBias(NNTestCase):
def run_test(
self,
device,
make_q,
make_kv,
attn_bias=None,
forw_tolerances: Optional[Tolerances] = None,
grad_tolerances: Optional[Tolerances] = None,
backend=None,
causal_variant=None,
):
if backend is not None:
torch._dynamo.reset()
query, key, value = make_q(), make_kv(), make_kv()
query_prototype, key_prototype, value_prototype = query_key_value_clones(query, key, value)
realized = attn_bias._materialize(device) if attn_bias is not None else None
pytorch_output = scaled_dot_product_attention(
query, key, value, attn_mask=realized, dropout_p=0.0, is_causal=False
)
sdpa_op = (
torch.compile(scaled_dot_product_attention, backend=backend)
if backend is not None
else scaled_dot_product_attention
)
sdpa_output = sdpa_op(
query_prototype,
key_prototype,
value_prototype,
attn_mask=attn_bias,
dropout_p=0.0,
is_causal=False,
scale=None,
)
dOut = torch.randn_like(pytorch_output)
pytorch_output.backward(dOut)
sdpa_output.backward(dOut)
# Use default assert_close tolerances for dtypes
if forw_tolerances is None:
forw_tolerances = Tolerances(atol=None, rtol=None)
if grad_tolerances is None:
grad_tolerances = Tolerances(atol=None, rtol=None)
torch.testing.assert_close(pytorch_output, sdpa_output, rtol=forw_tolerances.rtol, atol=forw_tolerances.atol)
torch.testing.assert_close(query.grad, query_prototype.grad, rtol=grad_tolerances.rtol, atol=grad_tolerances.atol)
torch.testing.assert_close(key.grad, key_prototype.grad, rtol=grad_tolerances.rtol, atol=grad_tolerances.atol)
torch.testing.assert_close(value.grad, value_prototype.grad, rtol=grad_tolerances.rtol, atol=grad_tolerances.atol)
@skipIfRocm # No support for the second variant for now
@parametrize("causal_variant", [CausalVariant.UPPER_LEFT, CausalVariant.LOWER_RIGHT])
@parametrize(
"shape",
[(16, 16, 128, 128, 16), (16, 16, 128, 256, 32), (16, 16, 256, 128, 32), (1, 1, 23, 56, 15)],
)
def test_causal_variants(self, device, causal_variant: CausalVariant, shape: List[Tuple[int]]):
make_tensor = partial(
torch.rand, device=device, dtype=torch.float16, requires_grad=True
)
bsz, num_heads, seq_len_q, seq_len_kv, head_dim = shape
make_q_tensor = partial(make_tensor, SdpaShape(bsz, num_heads, seq_len_q, head_dim))
make_kv_tensor = partial(make_tensor, SdpaShape(bsz, num_heads, seq_len_kv, head_dim))
if causal_variant == CausalVariant.LOWER_RIGHT and seq_len_q > seq_len_kv:
self.skipTest(
"Lower right causal mask will produce NaNs in the output when seq_len_q > seq_len_kv!"
)
forw_tol = Tolerances(1e-3, 1e-3)
grad_tol = Tolerances(5e-3, 5e-3)
if causal_variant == CausalVariant.UPPER_LEFT:
attn_bias = causal_upper_left(seq_len_q, seq_len_kv)
else:
attn_bias = causal_lower_right(seq_len_q, seq_len_kv)
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION,
SDPBackend.FLASH_ATTENTION,
SDPBackend.MATH,
SDPBackend.CUDNN_ATTENTION]):
self.run_test(device, make_q_tensor, make_kv_tensor, attn_bias, forw_tol, grad_tol, backend=None)
@skipIfRocm # CausalVariant
@parametrize("causal_variant", [CausalVariant.UPPER_LEFT, CausalVariant.LOWER_RIGHT])
@parametrize(
"shape",
[(16, 16, 128, 128, 16), (16, 16, 128, 256, 32), (16, 16, 256, 128, 32), (1, 1, 23, 56, 15)],
)
@unittest.skipIf(IS_WINDOWS, "torch.compile is not supported on windows")
@skipIfTorchDynamo("This function already calls torch.compile.")
def test_causal_variants_compile(self, device, causal_variant: CausalVariant, shape: List[Tuple[int]]):
cnts = CompileCounterWithBackend("aot_eager")
make_tensor = partial(
torch.rand, device=device, dtype=torch.float16, requires_grad=True
)
bsz, num_heads, seq_len_q, seq_len_kv, head_dim = shape
make_q_tensor = partial(make_tensor, SdpaShape(bsz, num_heads, seq_len_q, head_dim))
make_kv_tensor = partial(make_tensor, SdpaShape(bsz, num_heads, seq_len_kv, head_dim))
if causal_variant == CausalVariant.LOWER_RIGHT and seq_len_q > seq_len_kv:
self.skipTest(
"Lower right causal mask will produce NaNs in the output when seq_len_q > seq_len_kv!"
)
forw_tol = Tolerances(1e-3, 1e-3)
grad_tol = Tolerances(5e-3, 5e-3)
if causal_variant == CausalVariant.UPPER_LEFT:
attn_bias = causal_upper_left(seq_len_q, seq_len_kv)
else:
attn_bias = causal_lower_right(seq_len_q, seq_len_kv)
with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION,
SDPBackend.FLASH_ATTENTION,
SDPBackend.MATH,
SDPBackend.CUDNN_ATTENTION]):
self.run_test(device, make_q_tensor, make_kv_tensor, attn_bias, forw_tol, grad_tol, backend=cnts)
self.assertEqual(cnts.frame_count, 1, "Compiled graph should have 1 frame!")
@skipIfRocm
@parametrize("shape", [(16, 16, 128, 128, 16), (16, 16, 128, 256, 32), (16, 16, 256, 128, 32), (1, 1, 23, 56, 15)])
def test_is_causal_equals_upper_left(self, device, shape: List[Tuple[int]]):
make_tensor = partial(
torch.rand, device=device, dtype=torch.float16, requires_grad=True
)
bsz, num_heads, seq_len_q, seq_len_kv, head_dim = shape
make_q_tensor = partial(make_tensor, SdpaShape(bsz, num_heads, seq_len_q, head_dim))
make_kv_tensor = partial(make_tensor, SdpaShape(bsz, num_heads, seq_len_kv, head_dim))
forw_tol = Tolerances(1e-3, 1e-3)
grad_tol = Tolerances(5e-3, 5e-3)
query = make_q_tensor()
key = make_kv_tensor()
value = make_kv_tensor()
attn_bias = causal_upper_left(seq_len_q, seq_len_kv)
out_attn_bias = scaled_dot_product_attention(query, key, value, attn_mask=attn_bias, dropout_p=0.0)
out_is_causal = scaled_dot_product_attention(query, key, value, is_causal=True, dropout_p=0.0)
torch.testing.assert_close(out_attn_bias, out_is_causal, rtol=forw_tol.rtol, atol=forw_tol.atol)
def test_is_causal_and_mask_fails(self, device):
make_tensor = partial(
torch.rand, device=device, dtype=torch.float16, requires_grad=True
)
make_q_tensor = partial(make_tensor, SdpaShape(16, 16, 128, 16))
make_kv_tensor = partial(make_tensor, SdpaShape(16, 16, 128, 16))
query = make_q_tensor()
key = make_kv_tensor()
value = make_kv_tensor()
attn_bias = causal_upper_left(128, 128)
with self.assertRaisesRegex(ValueError, "CausalBias should not be used with causal=True"):
scaled_dot_product_attention(query, key, value, attn_mask=attn_bias, is_causal=True, dropout_p=0.0)
@unittest.skipIf(TEST_XPU, "XPU does not support cppextension currently")
@unittest.skipIf(IS_FBCODE, "Ninja is required to load C++ extensions and it's not compatible with Buck ")
class TestSDPAPrivateUse1Only(NNTestCase):
@classmethod
def setUpClass(cls):
remove_build_path()
cls.module = torch.utils.cpp_extension.load(
name="custom_device_extension",
sources=[
f"{'test/' if not os.getcwd().endswith('test') else ''}cpp_extensions/open_registration_extension.cpp",
],
extra_include_paths=["cpp_extensions"],
extra_cflags=["-g"],
verbose=True,
)
# register torch.foo module and foo device to torch
torch.utils.rename_privateuse1_backend("foo")
torch.utils.generate_methods_for_privateuse1_backend(for_storage=True)
torch._register_device_module("foo", generate_faked_module())
@skipIfTorchDynamo()
def test_fused_sdp_choice_privateuseone(self):
batch_size, seq_len, num_heads, head_dim = 4, 256, 2, 128
make_tensor = partial(torch.rand, device="cpu", dtype=torch.float16)
shape = SdpaShape(batch_size, num_heads, seq_len, head_dim)
q_cpu, k_cpu, v_cpu = make_tensor(shape), make_tensor(shape), make_tensor(shape)
q_privateuse1 = q_cpu.to("foo")
k_privateuse1 = k_cpu.to("foo")
v_privateuse1 = v_cpu.to("foo")
assert torch._fused_sdp_choice(q_privateuse1, k_privateuse1, v_privateuse1) == SDPBackend.OVERRIDEABLE.value
def test_scaled_dot_product_fused_attention_overrideable(self):
batch_size, seq_len, num_heads, head_dim = 4, 256, 2, 128
make_tensor = partial(torch.rand, device="cpu", dtype=torch.float16)
shape = SdpaShape(batch_size, num_heads, seq_len, head_dim)
q_cpu, k_cpu, v_cpu = make_tensor(shape), make_tensor(shape), make_tensor(shape)
q_privateuse1 = q_cpu.to("foo")
k_privateuse1 = k_cpu.to("foo")
v_privateuse1 = v_cpu.to("foo")
actual = torch.nn.functional.scaled_dot_product_attention(
q_privateuse1, k_privateuse1, v_privateuse1, attn_mask=None, dropout_p=0.0)
def test_scaled_dot_product_fused_attention_overrideable_backward(self):
batch_size, seq_len, num_heads, head_dim = 4, 256, 2, 128
make_tensor = partial(torch.rand, device="cpu", dtype=torch.float16, requires_grad=True)
shape = (batch_size, num_heads, seq_len, head_dim)
q_cpu, k_cpu, v_cpu = make_tensor(shape), make_tensor(shape), make_tensor(shape)
attn_mask = make_tensor((batch_size, num_heads, seq_len, seq_len))
q_privateuse1 = q_cpu.to("foo")
k_privateuse1 = k_cpu.to("foo")
v_privateuse1 = v_cpu.to("foo")
attn_mask_privateuse1 = attn_mask.to("foo")
output, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, philox_seed, philox_offset, debug_attn_mask = \
torch.ops.aten._scaled_dot_product_fused_attention_overrideable(
q_privateuse1, k_privateuse1, v_privateuse1, attn_bias=attn_mask_privateuse1)
rand_upward = torch.rand(shape, device="cpu", dtype=torch.float16, requires_grad=False)
rand_upward_privateuse1 = rand_upward.to("foo")
grad_input_mask = [True, True, True, True]
grad_q, grad_k, grad_v, grad_attn_mask = torch.ops.aten._scaled_dot_product_fused_attention_overrideable_backward(
rand_upward_privateuse1, q_privateuse1, k_privateuse1, v_privateuse1, attn_mask_privateuse1,
grad_input_mask, output, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p=0.0,
is_causal=False, philox_seed=philox_seed, philox_offset=philox_offset)
if NOTEST_CPU:
device_types = ("cuda", )
else:
device_types = ("cpu", "cuda")
instantiate_device_type_tests(TestTransformers, globals(), only_for=device_types)
instantiate_device_type_tests(TestSDPAFailureModes, globals(), only_for=device_types)
instantiate_device_type_tests(TestSDPA, globals(), only_for=device_types)
instantiate_device_type_tests(TestSDPACudaOnly, globals(), only_for=("cuda"))
instantiate_device_type_tests(TestSDPACpuOnly, globals(), only_for=("cpu"))
instantiate_device_type_tests(TestAttnBias, globals(), only_for=device_types)
if __name__ == '__main__':
run_tests()
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
test_scaled_dot_product_attention_math_with_negative_scale
|
def test_scaled_dot_product_attention_math_with_negative_scale(self, device, kernel: SDPBackend):
# https://github.com/pytorch/pytorch/issues/105190.
def ref(x):
v1 = torch.matmul(x, x.transpose(-1, -2))
v2 = v1 / -0.0001
v3 = v2.softmax(dim=-1)
v4 = torch.matmul(v3, x)
return v4
x = torch.randn(1, 3, 64, 64, device=device)
ref_result = ref(x)
with sdpa_kernel(backends=[kernel]):
sdp_math = torch.nn.functional.scaled_dot_product_attention(x, x, x, scale=-1.0 / 0.0001)
self.assertEqual(ref_result, sdp_math)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
class TestSDPA(NNTestCase):
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
ref
|
def ref(x):
v1 = torch.matmul(x, x.transpose(-1, -2))
v2 = v1 / -0.0001
v3 = v2.softmax(dim=-1)
v4 = torch.matmul(v3, x)
return v4
x = torch.randn(1, 3, 64, 64, device=device)
ref_result = ref(x)
with sdpa_kernel(backends=[kernel]):
sdp_math = torch.nn.functional.scaled_dot_product_attention(x, x, x, scale=-1.0 / 0.0001)
self.assertEqual(ref_result, sdp_math)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
test_sdpa_with_inf
|
def test_sdpa_with_inf(self, device):
# https://github.com/pytorch/pytorch/issues/127055.
full = torch.full((600, 600), float("-inf"), device=device)
mask = torch.triu(full, diagonal=1) + torch.tril(full, diagonal=-10)
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device, dtype=torch.float32, requires_grad=False)
input_shape = SdpaShape(1, 600, 2, 8)
q = make_tensor(input_shape)
k = make_tensor(input_shape)
v = make_tensor(input_shape)
with sdpa_kernel(backends=[SDPBackend.MATH]):
math_ref = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask)
with sdpa_kernel(backends=[SDPBackend.FLASH_ATTENTION]):
actual = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask)
self.assertEqual(math_ref, actual)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
class TestSDPACpuOnly(NNTestCase):
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
sdpa_helper
|
def sdpa_helper():
torch.manual_seed(777)
query = (
torch.empty(size=[2, 2, 49, 32], dtype=torch.float32, device=device)
.uniform_(-1, 1)
.requires_grad_(True)
)
key = (
torch.empty(size=[2, 2, 49, 32], dtype=torch.float32, device=device)
.uniform_(-1, 1)
.requires_grad_(True)
)
value = (
torch.empty(size=[2, 2, 49, 32], dtype=torch.float32, device=device)
.uniform_(-1, 1)
.requires_grad_(True)
)
res = torch.nn.functional.scaled_dot_product_attention(
query, key, value, None, 0.0, False
)
res_grad = (
torch.empty_like(res, device=device)
.uniform_(-1, 1)
)
res.backward(res_grad, retain_graph=True)
return res, query.grad, key.grad, value.grad
with sdpa_kernel(backends=[SDPBackend.MATH]):
res_ref, query_grad_ref, key_grad_ref, value_grad_ref = sdpa_helper()
with sdpa_kernel(backends=[SDPBackend.FLASH_ATTENTION]):
res_actual, query_grad_actual, key_grad_actual, value_grad_actual = sdpa_helper()
self.assertEqual(res_ref, res_actual)
self.assertEqual(query_grad_ref, query_grad_actual)
self.assertEqual(key_grad_ref, key_grad_actual)
self.assertEqual(value_grad_ref, value_grad_actual)
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
attention_inputs
|
def attention_inputs(seq_len, head_dim, device, dtype, mask_every_n_rows=4):
query = torch.rand(1, 1, seq_len, head_dim, requires_grad=True, device=device, dtype=dtype)
key = torch.rand(1, 1, seq_len, head_dim, requires_grad=True, device=device, dtype=dtype)
value = torch.rand(1, 1, seq_len, head_dim, requires_grad=True, device=device, dtype=dtype)
# Create a mask with deterministic row masking
mask = torch.ones(1, 1, seq_len, seq_len, dtype=torch.bool, device=device)
# Mask every nth row
mask[0, 0, ::mask_every_n_rows, :] = False
# Create a fixed pattern for element-wise masking
element_mask = torch.zeros(seq_len, seq_len, dtype=torch.bool, device=device)
element_mask[torch.arange(seq_len)[:, None] % 5 == torch.arange(seq_len) % 5] = True
# Combine row masking and element-wise masking
mask = mask & element_mask.unsqueeze(0).unsqueeze(0)
return query, key, value, mask
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
compute_output_and_grads
|
def compute_output_and_grads(query, key, value, mask, backend):
with sdpa_kernel(backend):
masked_out = scaled_dot_product_attention(query, key, value, attn_mask=mask)
loss = masked_out.sum()
grads = torch.autograd.grad(loss, [query, key, value])
return masked_out, grads
if backend == SDPBackend.FLASH_ATTENTION and "cuda" in str(device):
unittest.skip("FlashAttention does not support masks on cuda")
return
if backend == SDPBackend.EFFICIENT_ATTENTION and "cpu" in str(device):
unittest.skip("EfficientAttention does not support masks on cpu")
return
query, key, value, mask = attention_inputs(seq_len, head_dim, device, dtype)
# Compute results for the tested backend
backend_out, backend_grads = compute_output_and_grads(query, key, value, mask, backend)
# Compute results for the Math backend
math_out, math_grads = compute_output_and_grads(query, key, value, mask, SDPBackend.MATH)
# Compare outputs
torch.testing.assert_close(backend_out, math_out, atol=5e-3, rtol=0)
self.assertFalse(backend_out.isnan().any())
self.assertFalse(math_out.isnan().any())
# Compare gradients
for bg, mg in zip(backend_grads, math_grads):
torch.testing.assert_close(bg, mg, atol=3e-3, rtol=0)
self.assertFalse(bg.isnan().any())
self.assertFalse(mg.isnan().any())
# Check if masked rows are zero in output
mask_sum = mask.sum(dim=-1, keepdim=True)
masked_rows = (mask_sum == 0).expand_as(backend_out)
self.assertTrue((mask_sum == 0).sum() > 0, "No fully masked out rows found")
assert torch.all(backend_out[masked_rows] == 0), \
f"Non-zero values in fully masked rows for {backend=}"
# Check if gradients for masked rows are zero
grad_query = backend_grads[0]
assert torch.all(grad_query[masked_rows] == 0), f"Non-zero gradients in fully masked rows for {backend=}"
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
test_storage_dead_weak_ref
|
def test_storage_dead_weak_ref(self):
x = torch.UntypedStorage(2)
w_x = weakref.ref(x)
y = torch.tensor(x)
del x
x = w_x()
# Ideally, x would keep the storage live. But CPython doesn't
# provide enough hooks to do this. So it will go dead and x
# will transmute into storage with null StorageImpl. Not great, but the
# best we can do.
del y
self.assertRaisesRegex(RuntimeError, "Got a null Storage", lambda: x[0])
self.assertRaisesRegex(RuntimeError, "Got a null Storage", lambda: x.float())
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
test_tensor_resurrected_weak_ref
|
x = torch.empty(2)
w_x = weakref.ref(x)
y = torch.empty(2)
y.grad = x
del x
x = w_x()
# Use this to manually fix weak references after dereferencing them
x._fix_weakref()
del y
x.sigmoid()
|
def test_tensor_resurrected_weak_ref(self):
x = torch.empty(2)
w_x = weakref.ref(x)
y = torch.empty(2)
y.grad = x
del x
x = w_x()
# Use this to manually fix weak references after dereferencing them
x._fix_weakref()
del y
x.sigmoid()
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_transformers.py
|
test_flash_attention_fail_with_non_square_causal_attention
|
def test_flash_attention_fail_with_non_square_causal_attention(self, device):
dtype = torch.bfloat16
q_shape = SdpaShape(1, 1, 8, 16)
kv_shape = SdpaShape(1, 1, 12, 16)
make_q = partial(torch.rand, q_shape, device=device, dtype=dtype)
make_kv = partial(torch.rand, kv_shape, device=device, dtype=dtype)
q, k, v = make_q(), make_kv(), make_kv()
warning_str = "Flash attention does not support the is_causal flag when seqlen_q != seqlen_k."
with sdpa_kernel(backends=[SDPBackend.FLASH_ATTENTION]):
with self.assertWarnsRegex(UserWarning, warning_str):
self.assertRaises(RuntimeError, lambda: torch.nn.functional.scaled_dot_product_attention(
q, k, v, None, 0.0, is_causal=True))
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
class TestSDPAFailureModes(NNTestCase):
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
_get_block_size_n
|
def _get_block_size_n(device, head_dim, is_dropout, is_causal):
# This should match the block sizes in the CUDA kernel
assert head_dim <= 256
major, minor = torch.cuda.get_device_capability(device)
is_sm8x = major == 8 and minor > 0 # Only include sm86 and sm89, exclude sm80 (A100)
is_sm80 = major == 8 and minor == 0
is_sm90 = major == 9 and minor == 0
if head_dim <= 32:
return 128
if head_dim <= 64:
return 128 if not is_dropout else 64
elif head_dim <= 96:
return 64
elif head_dim <= 128:
if is_sm8x:
return 64 if (not is_dropout and is_causal) else 32
else:
return 64 if not is_dropout else 32
elif head_dim <= 160:
if is_sm8x:
return 64
else:
return 32
elif head_dim <= 192:
return 64
elif head_dim <= 224:
return 64
elif head_dim <= 256:
return 64
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
test_data_ptr_of_empty_view_with_storage
|
def test_data_ptr_of_empty_view_with_storage(self):
t = torch.empty((2, 2))
self.assertNotEqual(t.data_ptr(), 0)
t2 = t[0:0].view(0, 1)
self.assertEqual(t2.data_ptr(), 0)
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
_get_tensor_prop
|
def _get_tensor_prop(self, t):
preserved = (
id(t),
# Refcount values get modified by Dynamo resume frames
0 if TEST_WITH_TORCHDYNAMO else sys.getrefcount(t),
)
slotnames = copyreg._slotnames(t.__class__)
moved = (
slotnames,
id(t.__dict__),
tuple(t.__dict__.keys()),
[getattr(t, name, None) for name in slotnames]
)
return preserved, moved
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_torch.py
|
test_bf16_supported_on_cpu
|
# The following block extends TestTorch with negative dim wrapping tests
# FIXME: replace these with OpInfo sample inputs or systemic OpInfo tests
# Functions to test negative dimension wrapping
METHOD = 1
INPLACE_METHOD = 2
FUNCTIONAL = 4
DIM_ARG = None
|
def test_bf16_supported_on_cpu(self):
self.assertFalse(torch.cuda.is_bf16_supported())
# The following block extends TestTorch with negative dim wrapping tests
# FIXME: replace these with OpInfo sample inputs or systemic OpInfo tests
# Functions to test negative dimension wrapping
|
import torch
import torch.utils.data
import numpy as np
import contextlib
import gc
import io
import inspect
import itertools
import math
import random
import re
import copy
import os
import tempfile
import unittest
import warnings
import types
import pickle
import textwrap
import subprocess
import weakref
import sys
import copyreg
from torch import inf, nan
from itertools import product, combinations, permutations, chain
from functools import partial
from torch import multiprocessing as mp
from torch.testing import make_tensor
from torch.testing._internal.common_optimizers import (
optim_db, optims, _get_optim_inputs_including_global_cliquey_kwargs)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
TEST_WITH_TORCHINDUCTOR, TEST_WITH_ROCM, run_tests, IS_JETSON,
IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN,
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, skipIfTorchInductor, load_tests, slowTest, slowTestIf,
TEST_WITH_CROSSREF, skipIfTorchDynamo, skipRocmIfTorchInductor, set_default_dtype,
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
expectedFailureXLA,
instantiate_device_type_tests,
onlyCUDA, onlyCPU,
dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast,
skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes,
get_all_device_types, skipXLA)
from typing import Tuple
import torch.backends.quantized
import torch.testing._internal.data
from torch.testing._internal.common_cuda import (
tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN, TEST_MULTIGPU,
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import TestCase # type: ignore[assignment]
load_tests = load_tests
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(0) == (8, 6)
from copy import deepcopy
from copy import deepcopy
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from scipy import stats
from copy import copy
from functools import partial
class TestTorch(TestCase):
from copy import deepcopy
import pickle
import pickle
import pickle
from torch._torch_docs import __file__ as doc_file
from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args
from copy import deepcopy
from torch.library import Library, impl
import weakref
import weakref
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_transformers.py
|
use_deterministic_algorithims
|
def use_deterministic_algorithims(mode: bool, warn_only: bool):
r"""
This context manager can be used to temporarily enable or disable deterministic algorithms.
Upon exiting the context manager, the previous state of the flag will be restored.
"""
previous_mode: bool = torch.are_deterministic_algorithms_enabled()
previous_warn_only: bool = torch.is_deterministic_algorithms_warn_only_enabled()
try:
torch.use_deterministic_algorithms(mode, warn_only=warn_only)
yield{}
except RuntimeError as err:
raise err
finally:
torch.use_deterministic_algorithms(previous_mode, warn_only=previous_warn_only)
# Found in torch/testing/_comparison.py
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM86Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (8, 6)
|
def use_deterministic_algorithims(mode: bool, warn_only: bool):
r"""
This context manager can be used to temporarily enable or disable deterministic algorithms.
Upon exiting the context manager, the previous state of the flag will be restored.
"""
previous_mode: bool = torch.are_deterministic_algorithms_enabled()
previous_warn_only: bool = torch.is_deterministic_algorithms_warn_only_enabled()
try:
torch.use_deterministic_algorithms(mode, warn_only=warn_only)
yield {}
finally:
torch.use_deterministic_algorithms(previous_mode, warn_only=previous_warn_only)
# Found in torch/testing/_comparison.py
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
|
import contextlib
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
from torch.backends.cuda import sdp_kernel, SDPBackend
import torch.optim as optim
from torch.testing._internal.common_dtype import floating_types_and_half
from typing import Tuple
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
TEST_FAIRSEQ,
run_tests,
parametrize,
instantiate_parametrized_tests,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck
)
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import TEST_CUDA, SM80OrLater, PLATFORM_SUPPORTS_FUSED_SDPA
import fairseq.models.transformer as fairseq_transformer
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_transformers.py
|
query_key_value_clones
|
def query_key_value_clones(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, dtype: torch.dtype = None):
""" Clones the query, key, and value tensors and moves them to the specified dtype. """
if dtype is None:
dtype = query.dtype
query_ref = query.clone().detach().to(dtype).requires_grad_(query.requires_grad)
key_ref = key.clone().detach().to(dtype).requires_grad_(key.requires_grad)
value_ref = value.clone().detach().to(dtype).requires_grad_(value.requires_grad)
return query_ref, key_ref, value_ref
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_transformers.py
|
get_platform_specific_sdpa
|
def get_platform_specific_sdpa():
ret = []
if PLATFORM_SUPPORTS_FLASH_ATTENTION:
ret.append(SDPBackend.FLASH_ATTENTION)
if PLATFORM_SUPPORTS_MEM_EFF_ATTENTION:
ret.append(SDPBackend.EFFICIENT_ATTENTION)
if PLATFORM_SUPPORTS_CUDNN_ATTENTION:
ret.append(SDPBackend.CUDNN_ATTENTION)
if not ret:
# Add a placeholder, an empty list causes "An empty arg_values was passed to @parametrize"
ret.append(SDPBackend.EFFICIENT_ATTENTION)
return ret
PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
# Indicate the Efficient attention backend can support:
# 1. sequence longher than 512
# 2. head dimsion larger than 64
MEM_EFF_CAPABILITY_MATCHES_SM80 = SM80OrLater or TEST_WITH_ROCM
|
import contextlib
from functools import partial
from collections import namedtuple
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import scaled_dot_product_attention
from torch.nn.attention import sdpa_kernel, SDPBackend
from torch.nn.attention.bias import CausalVariant, causal_lower_right, causal_upper_left
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
import itertools
import torch.optim as optim
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Optional, Dict
import torch.utils.cpp_extension
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
IS_FBCODE,
TEST_WITH_ROCM,
skipIfRocm,
skipIfTorchDynamo,
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck,
make_tensor,
NOTEST_CPU,
IS_WINDOWS,
TEST_WITH_TORCHDYNAMO,
TEST_XPU,
)
from torch._dynamo.testing import CompileCounterWithBackend
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import (
IS_JETSON, SM80OrLater, PLATFORM_SUPPORTS_FLASH_ATTENTION,
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
PLATFORM_SUPPORTS_FUSED_ATTENTION,
PLATFORM_SUPPORTS_CUDNN_ATTENTION,
SM90OrLater,
tf32_on_and_off
)
from test_cpp_extensions_open_device_registration import (
remove_build_path,
generate_faked_module
)
import fairseq.models.transformer as fairseq_transformer
SdpaShape = namedtuple('Sdpa_Shape', ['batch', 'num_heads', 'seq_len', 'head_dim'])
Tolerances = namedtuple('Tolerances', ['atol', 'rtol'])
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM8XDevice = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 7), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
isLessThanSM80Device = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 8
from torch.nn.attention.bias import _calculate_scale
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.