library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/test_python_dispatch.py
my_neg
def my_neg(*args, **kwargs): return args[0]._neg_view() # Now we are secretly making the operator a view op so autograd needs to know how # to handle it my_lib1.impl('neg', my_neg, "AutogradCPU") self.assertTrue(torch.neg(x).is_neg()) # RuntimeError: impl("aten::neg", ...): # Explicitly provided namespace (aten) in operator name does not match ... with self.assertRaisesRegex(RuntimeError, "operator name does not match namespace"): my_lib3 = Library("foo", "DEF") my_lib3.define("neg(Tensor self) -> Tensor") my_lib3.impl(torch.ops.aten.neg.default, my_neg, "AutogradCPU") del my_lib3 # Example 2
def my_neg(*args, **kwargs): return args[0]._neg_view() # Now we are secretly making the operator a view op so autograd needs to know how # to handle it my_lib1.impl("neg", my_neg, "AutogradCPU") self.assertTrue(torch.neg(x).is_neg()) # RuntimeError: impl("aten::neg", ...): # Explicitly provided namespace (aten) in operator name does not match ... with self.assertRaisesRegex( RuntimeError, "operator name does not match namespace" ): with _scoped_library("foo", "DEF") as my_lib3: my_lib3.define("neg(Tensor self) -> Tensor") my_lib3.impl(torch.ops.aten.neg.default, my_neg, "AutogradCPU") # Example 2
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
tearDown
def tearDown(self): if hasattr(torch.ops, self.test_ns): del torch.ops._test_python_registration
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonRegistration(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
my_fallback
def my_fallback(op, *args, **kwargs): # Disable our handler during checks and generating the output with torch._C._ForceDispatchKeyGuard( include_to_set, exclude_to_set | test_keyset ): self.assertIs(op, expected_op) self.assertEqual(args, expected_args) self.assertEqual(kwargs, expected_kwargs) # Return something specific return torch.empty(out_shape) my_lib.fallback(my_fallback, test_key) a, b = torch.rand(2), torch.rand(2) with torch._C._ForceDispatchKeyGuard(include_to_set, exclude_to_set): # Check a factory function expected_op = torch.ops.aten.empty.memory_format expected_args = ((2, 2),) # Extra kwargs to bypass issues with default args in factory functions expected_kwargs = { "dtype": torch.float64, "pin_memory": False, "device": torch.device("cpu"), } out_shape = (3,) out = torch.empty(*expected_args, **expected_kwargs) self.assertEqual(out.size(), out_shape) # Check a regular function expected_op = torch.ops.aten.add.Tensor expected_args = (a, b) expected_kwargs = {} out_shape = (4,) out = a + b self.assertEqual(out.size(), out_shape)
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
fast_gelu
def fast_gelu(*args, **kwargs): CALLED[0] = True return jitted_gelu(*args, **kwargs) # overriding gelu's cuda kernel with Jiterator generated relu kernel my_lib = Library("aten", "IMPL") my_lib.impl('aten::gelu', fast_gelu, "CUDA") x = torch.rand([3, 3], device='cuda', dtype=torch.float) self.assertEqual(torch.nn.functional.gelu(x), torch.nn.functional.relu(x)) self.assertTrue(CALLED[0]) del my_lib # behavior restored after deregistration self.assertNotEqual(torch.nn.functional.gelu(x), torch.nn.functional.relu(x))
def fast_gelu(*args, **kwargs): CALLED[0] = True return jitted_gelu(*args, **kwargs) # overriding gelu's cuda kernel with Jiterator generated relu kernel with _scoped_library("aten", "IMPL") as my_lib: my_lib.impl("aten::gelu", fast_gelu, "CUDA") x = torch.rand([3, 3], device="cuda", dtype=torch.float) self.assertEqual( torch.nn.functional.gelu(x), torch.nn.functional.relu(x) ) self.assertTrue(CALLED[0]) # behavior restored after deregistration self.assertNotEqual( torch.nn.functional.gelu(x), torch.nn.functional.relu(x) )
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
clipped_exp
def clipped_exp(*args, **kwargs): CALLED[0] = True return jitted_exp(*args, **kwargs) # overriding exp's cuda kernel with clipped_exp kernel my_lib = Library("aten", "IMPL") my_lib.impl('aten::exp', clipped_exp, "CUDA") x = torch.tensor([0.0, 100.0], device='cuda', dtype=torch.float16) self.assertEqual(torch.exp(x), torch.tensor([1.0, 22026.4657948], dtype=torch.float16)) self.assertTrue(CALLED[0]) del my_lib # behavior restored after deregistration self.assertEqual(torch.exp(x), torch.tensor([1.0, torch.inf], dtype=torch.float16))
def clipped_exp(*args, **kwargs): CALLED[0] = True return jitted_exp(*args, **kwargs) # overriding exp's cuda kernel with clipped_exp kernel with _scoped_library("aten", "IMPL") as my_lib: my_lib.impl("aten::exp", clipped_exp, "CUDA") x = torch.tensor([0.0, 100.0], device="cuda", dtype=torch.float16) self.assertEqual( torch.exp(x), torch.tensor([1.0, 22026.4657948], dtype=torch.float16), ) self.assertTrue(CALLED[0]) # behavior restored after deregistration self.assertEqual( torch.exp(x), torch.tensor([1.0, torch.inf], dtype=torch.float16) )
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
buggy_add
def buggy_add(*args, **kwargs): CALLED[0] = True return jitted_add(*args, **kwargs) my_lib = Library("aten", "IMPL") my_lib.impl('aten::add.Tensor', buggy_add, "CUDA") x_cpu = torch.rand([3, 3], device='cpu') y_cpu = torch.rand([3], device='cpu') x_cuda = x_cpu.cuda() y_cuda = y_cpu.cuda() self.assertEqual(x_cuda + y_cuda, x_cpu + y_cpu + 1) self.assertTrue(CALLED[0]) del my_lib # behavior restored after deregistration self.assertEqual(x_cuda + y_cuda, x_cpu + y_cpu)
def buggy_add(*args, **kwargs): CALLED[0] = True return jitted_add(*args, **kwargs) with _scoped_library("aten", "IMPL") as my_lib: my_lib.impl("aten::add.Tensor", buggy_add, "CUDA") x_cpu = torch.rand([3, 3], device="cpu") y_cpu = torch.rand([3], device="cpu") x_cuda = x_cpu.cuda() y_cuda = y_cpu.cuda() self.assertEqual(x_cuda + y_cuda, x_cpu + y_cpu + 1) self.assertTrue(CALLED[0]) # behavior restored after deregistration self.assertEqual(x_cuda + y_cuda, x_cpu + y_cpu)
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
my_sum
def my_sum(*args, **kwargs): run[0] = True return args[0] my_lib1 = Library("aten", "IMPL") my_lib1.impl('aten::sum', my_sum, "CPU") x = torch.tensor([1, 2]) self.assertEqual(torch.sum(x), x) self.assertTrue(run[0]) del my_lib1 # Validate that the old behavior is restored for sum self.assertEqual(torch.sum(x), torch.tensor(3))
def my_sum(*args, **kwargs): run[0] = True return args[0].clone() with _scoped_library("aten", "IMPL") as my_lib1: my_lib1.impl("aten::sum", my_sum, "CPU") x = torch.tensor([1, 2]) self.assertEqual(torch.sum(x), x) self.assertTrue(run[0]) # Validate that the old behavior is restored for sum self.assertEqual(torch.sum(x), torch.tensor(3))
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
my_sum
def my_sum(*args, **kwargs): run[0] = True return args[0] my_lib1 = Library("aten", "IMPL") my_lib1.impl('aten::sum', my_sum, "CPU") x = torch.tensor([1, 2]) self.assertEqual(torch.sum(x), x) self.assertTrue(run[0]) del my_lib1 # Validate that the old behavior is restored for sum self.assertEqual(torch.sum(x), torch.tensor(3))
def my_sum(*args, **kwargs): run[0] = True return args[0].clone() with _scoped_library("aten", "IMPL") as my_lib1: my_lib1.impl("aten::sum", my_sum, "CPU") x = torch.tensor([1, 2]) self.assertEqual(torch.sum(x), x) self.assertTrue(run[0]) # Validate that the old behavior is restored for sum self.assertEqual(torch.sum(x), torch.tensor(3))
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
my_sum_zt
def my_sum_zt(*args, **kwargs): if args[0]._is_zerotensor(): return torch._efficientzerotensor(args[0].shape) else: return args[0] y = torch._efficientzerotensor(3) self.assertTrue(torch.ops.foo.sum(y)._is_zerotensor()) self.assertEqual(torch.ops.foo.sum(x), x) del my_lib2 del my_lib1
def my_sum_zt(*args, **kwargs): if args[0]._is_zerotensor(): return torch._efficientzerotensor(args[0].shape) else: return args[0].clone() y = torch._efficientzerotensor(3) self.assertTrue(op(y)._is_zerotensor()) self.assertEqual(op(x), x)
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
my_sum
def my_sum(*args, **kwargs): run[0] = True return args[0] my_lib1 = Library("aten", "IMPL") my_lib1.impl('aten::sum', my_sum, "CPU") x = torch.tensor([1, 2]) self.assertEqual(torch.sum(x), x) self.assertTrue(run[0]) del my_lib1 # Validate that the old behavior is restored for sum self.assertEqual(torch.sum(x), torch.tensor(3))
def my_sum(*args, **kwargs): run[0] = True return args[0].clone() with _scoped_library("aten", "IMPL") as my_lib1: my_lib1.impl("aten::sum", my_sum, "CPU") x = torch.tensor([1, 2]) self.assertEqual(torch.sum(x), x) self.assertTrue(run[0]) # Validate that the old behavior is restored for sum self.assertEqual(torch.sum(x), torch.tensor(3))
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_create_new_library_fragment_no_existing
x = torch.tensor([1, 2]) self.assertEqual(torch.ops.foo.sum(x), x)
def test_create_new_library_fragment_no_existing(self): with _scoped_library(self.test_ns, "FRAGMENT") as my_lib: my_lib.define("sum2(Tensor self) -> Tensor") @torch.library.impl(my_lib, "sum2", "CPU") def my_sum(*args, **kwargs): return args[0] x = torch.tensor([1, 2]) self.assertEqual(getattr(torch.ops, self.test_ns).sum2(x), x)
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonRegistration(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
my_sum
def my_sum(*args, **kwargs): run[0] = True return args[0] my_lib1 = Library("aten", "IMPL") my_lib1.impl('aten::sum', my_sum, "CPU") x = torch.tensor([1, 2]) self.assertEqual(torch.sum(x), x) self.assertTrue(run[0]) del my_lib1 # Validate that the old behavior is restored for sum self.assertEqual(torch.sum(x), torch.tensor(3))
def my_sum(*args, **kwargs): run[0] = True return args[0].clone() with _scoped_library("aten", "IMPL") as my_lib1: my_lib1.impl("aten::sum", my_sum, "CPU") x = torch.tensor([1, 2]) self.assertEqual(torch.sum(x), x) self.assertTrue(run[0]) # Validate that the old behavior is restored for sum self.assertEqual(torch.sum(x), torch.tensor(3))
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
my_sum_zt
def my_sum_zt(*args, **kwargs): if args[0]._is_zerotensor(): return torch._efficientzerotensor(args[0].shape) else: return args[0] y = torch._efficientzerotensor(3) self.assertTrue(torch.ops.foo.sum(y)._is_zerotensor()) self.assertEqual(torch.ops.foo.sum(x), x) del my_lib2 del my_lib1
def my_sum_zt(*args, **kwargs): if args[0]._is_zerotensor(): return torch._efficientzerotensor(args[0].shape) else: return args[0].clone() y = torch._efficientzerotensor(3) self.assertTrue(op(y)._is_zerotensor()) self.assertEqual(op(x), x)
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
first_fallback
def first_fallback(keyset, op, *args, **kwargs): nonlocal first_called if second_called: # Recursive call first_called = True with torch._C._ForceDispatchKeyGuard( include_to_set, exclude_to_set | test_keyset ): return op(*args, **kwargs) else: # Redispatch down keyset = keyset.remove(test_key_first) return op.redispatch(keyset, *args, **kwargs)
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
second_fallback
# torch.ops.aten.mul.Tensor my_lib2.impl("aten::mul.Tensor", my_mul, "ZeroTensor") y = torch._efficientzerotensor(2) self.assertFalse(torch.mul(x, y)._is_zerotensor())
def second_fallback(op, *args, **kwargs): nonlocal second_called # Set to avoid infinite recursion second_called = True # New dispatcher call should hit the first callback again self.assertFalse(first_called) a, b = args # Make a substraction here instead of add ! c = a - b self.assertTrue(first_called) return c my_lib.fallback(first_fallback, test_key_first, with_keyset=True) my_lib.fallback(second_fallback, test_key_second) a, b = torch.rand(2), torch.rand(2) with torch._C._ForceDispatchKeyGuard(include_to_set, exclude_to_set): c = a + b self.assertEqual(c, a - b) self.assertTrue(first_called) self.assertTrue(second_called)
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
my_fallback
# Assert that a user can't override the behavior of a (ns, op, dispatch_key) # combination if someone overrided the behavior for the same before them with self.assertRaisesRegex(RuntimeError, 'already a kernel registered from python'): my_lib2.impl(torch.ops.aten.mul.Tensor, my_mul, "ZeroTensor") del my_lib1 # Validate that lib2 is not affected by removing lib1 self.assertFalse(torch.mul(x, y)._is_zerotensor()) del my_lib2
def my_fallback(op, *args, **kwargs): # Disable our handler during checks and generating the output with torch._C._ForceDispatchKeyGuard( include_to_set, exclude_to_set | test_keyset ): self.assertIs(op, expected_op) self.assertEqual(args, expected_args) self.assertEqual(kwargs, expected_kwargs) # Return something specific return torch.empty(out_shape) my_lib.fallback(my_fallback, test_key) a, b = torch.rand(2), torch.rand(2) with torch._C._ForceDispatchKeyGuard(include_to_set, exclude_to_set): # Check a factory function expected_op = torch.ops.aten.empty.memory_format expected_args = ((2, 2),) # Extra kwargs to bypass issues with default args in factory functions expected_kwargs = { "dtype": torch.float64, "pin_memory": False, "device": torch.device("cpu"), } out_shape = (3,) out = torch.empty(*expected_args, **expected_kwargs) self.assertEqual(out.size(), out_shape) # Check a regular function expected_op = torch.ops.aten.add.Tensor expected_args = (a, b) expected_kwargs = {} out_shape = (4,) out = a + b self.assertEqual(out.size(), out_shape)
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
my_neg
def my_neg(*args, **kwargs): return args[0]._neg_view() # Now we are secretly making the operator a view op so autograd needs to know how # to handle it my_lib1.impl('neg', my_neg, "AutogradCPU") self.assertTrue(torch.neg(x).is_neg()) # RuntimeError: impl("aten::neg", ...): # Explicitly provided namespace (aten) in operator name does not match ... with self.assertRaisesRegex(RuntimeError, "operator name does not match namespace"): my_lib3 = Library("foo", "DEF") my_lib3.define("neg(Tensor self) -> Tensor") my_lib3.impl(torch.ops.aten.neg.default, my_neg, "AutogradCPU") del my_lib3 # Example 2
def my_neg(*args, **kwargs): return args[0]._neg_view() # Now we are secretly making the operator a view op so autograd needs to know how # to handle it my_lib1.impl("neg", my_neg, "AutogradCPU") self.assertTrue(torch.neg(x).is_neg()) # RuntimeError: impl("aten::neg", ...): # Explicitly provided namespace (aten) in operator name does not match ... with self.assertRaisesRegex( RuntimeError, "operator name does not match namespace" ): with _scoped_library("foo", "DEF") as my_lib3: my_lib3.define("neg(Tensor self) -> Tensor") my_lib3.impl(torch.ops.aten.neg.default, my_neg, "AutogradCPU") # Example 2
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_error_if_fn_not_callable
def test_error_if_fn_not_callable(self): with self.assertRaisesRegex(TypeError, "Input function is required to be a callable"): my_lib = Library("aten", "IMPL") my_lib.impl(torch.ops.aten.neg.default, [], "AutogradCPU")
def test_error_if_fn_not_callable(self): with self.assertRaisesRegex( TypeError, "Input function is required to be a callable" ): with _scoped_library("aten", "IMPL") as my_lib: my_lib.impl(torch.ops.aten.neg.default, [], "AutogradCPU")
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class TestPythonRegistration(TestCase):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonRegistration(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_finalizer
def test_finalizer(self): impls_refcnt = sys.getrefcount(torch.library._impls) lib = Library(self.test_ns, "FRAGMENT") # noqa: TOR901 lib.define("foo123(Tensor x) -> Tensor") # 1 for `lib`, 1 for sys.getrefcount self.assertEqual(sys.getrefcount(lib), 2) # We gained an additional reference that gets cleared when the finalizer runs self.assertEqual(sys.getrefcount(torch.library._impls), impls_refcnt + 1) # 1 for `lib` # 1 for the finalizer # 1 for sys.getrefcount self.assertEqual(sys.getrefcount(lib._op_impls), 3) def foo123(x): pass lib.impl(f"{self.test_ns}::foo123", foo123, "CPU") key = f"{self.test_ns}/foo123/CPU" self.assertTrue(key in torch.library._impls) saved_op_impls = lib._op_impls # del will definitely work if the following passes self.assertEqual(sys.getrefcount(lib), 2) del lib # 1 for saved_op_impls # 1 for sys.getrefcount # This function should be the last user of lib._op_impls: # - lib should not have a reference anymore (it was del'ed) # - lib's finalizer should not have a reference anymore self.assertEqual(sys.getrefcount(saved_op_impls), 2) self.assertTrue(key not in torch.library._impls) # lib's finalizer should not have a reference anymore self.assertEqual(sys.getrefcount(torch.library._impls), impls_refcnt)
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonRegistration(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
foo123
def test_override_cpu_sum(self) -> None: # Example 1 run = [False]
def foo123(x): pass lib.impl(f"{self.test_ns}::foo123", foo123, "CPU") key = f"{self.test_ns}/foo123/CPU" self.assertTrue(key in torch.library._impls) saved_op_impls = lib._op_impls # del will definitely work if the following passes self.assertEqual(sys.getrefcount(lib), 2) del lib # 1 for saved_op_impls # 1 for sys.getrefcount # This function should be the last user of lib._op_impls: # - lib should not have a reference anymore (it was del'ed) # - lib's finalizer should not have a reference anymore self.assertEqual(sys.getrefcount(saved_op_impls), 2) self.assertTrue(key not in torch.library._impls) # lib's finalizer should not have a reference anymore self.assertEqual(sys.getrefcount(torch.library._impls), impls_refcnt)
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
my_sum
def my_sum(*args, **kwargs): run[0] = True return args[0] my_lib1 = Library("aten", "IMPL") my_lib1.impl('aten::sum', my_sum, "CPU") x = torch.tensor([1, 2]) self.assertEqual(torch.sum(x), x) self.assertTrue(run[0]) del my_lib1 # Validate that the old behavior is restored for sum self.assertEqual(torch.sum(x), torch.tensor(3))
def my_sum(*args, **kwargs): run[0] = True return args[0].clone() with _scoped_library("aten", "IMPL") as my_lib1: my_lib1.impl("aten::sum", my_sum, "CPU") x = torch.tensor([1, 2]) self.assertEqual(torch.sum(x), x) self.assertTrue(run[0]) # Validate that the old behavior is restored for sum self.assertEqual(torch.sum(x), torch.tensor(3))
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
inverted_where
def inverted_where(*args, **kwargs): CALLED[0] = True return jitted_where(*args, **kwargs) # overriding where's cuda kernel with Jiterator generated kernel my_lib = Library("aten", "IMPL") my_lib.impl('aten::where.self', inverted_where, "CUDA") device = 'cuda' cond = torch.tensor([True, True, False], device=device, dtype=torch.bool) x = torch.tensor([1, 2, 3], device=device) y = torch.tensor([-1, -2, -3], device=device) self.assertEqual(torch.where(cond, x, y), torch.tensor([-1, -2, 3])) self.assertTrue(CALLED[0]) del my_lib # behavior restored after deregistration self.assertEqual(torch.where(cond, x, y), torch.tensor([1, 2, -3]))
def inverted_where(*args, **kwargs): CALLED[0] = True return jitted_where(*args, **kwargs) # overriding where's cuda kernel with Jiterator generated kernel with _scoped_library("aten", "IMPL") as my_lib: my_lib.impl("aten::where.self", inverted_where, "CUDA") device = "cuda" cond = torch.tensor( [True, True, False], device=device, dtype=torch.bool ) x = torch.tensor([1, 2, 3], device=device) y = torch.tensor([-1, -2, -3], device=device) self.assertEqual(torch.where(cond, x, y), torch.tensor([-1, -2, 3])) self.assertTrue(CALLED[0]) # behavior restored after deregistration self.assertEqual(torch.where(cond, x, y), torch.tensor([1, 2, -3]))
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
f
def f(x, y): # Returns (TwoTensor, Tensor) return x * y, y + y x_a = torch.zeros(4) x_b = torch.zeros(4) y = torch.ones(4) # make_fx() is not responsible for unwrapping tensor subclass inputs, # so we do it manually here. # Why? In general, make_fx(f)(*args) promises that the graph returned has the same calling # convention as f(*args). Unwrapping tensor subclass inputs can potentially change # the number of input args to the graph, breaking that assumption
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
test_register_functional_op_no_returns
def test_register_functional_op_no_returns(self): with _scoped_library(self.test_ns, "FRAGMENT") as lib: lib.define("foo(Tensor x, Tensor(a!) y, Tensor z, Tensor(b!) w) -> ()") def foo_impl(x, y, z, w): y.fill_(3.14) w.fill_(2.71) lib.impl("foo", foo_impl, "CPU") register_functional_op( lib, "foo_functional", getattr(torch.ops, self.test_ns).foo.default ) x = torch.randn([]) y = torch.randn([]) z = torch.randn([]) w = torch.randn([]) self._check_is_functional_variant( getattr(torch.ops, self.test_ns).foo.default, getattr(torch.ops, self.test_ns).foo_functional.default, (x, y, z, w), )
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonRegistration(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
foo_impl
def foo_impl(x, y, z, w): y.fill_(3.14) w.fill_(2.71) lib.impl("foo", foo_impl, "CPU") register_functional_op( lib, "foo_functional", getattr(torch.ops, self.test_ns).foo.default ) x = torch.randn([]) y = torch.randn([]) z = torch.randn([]) w = torch.randn([]) self._check_is_functional_variant( getattr(torch.ops, self.test_ns).foo.default, getattr(torch.ops, self.test_ns).foo_functional.default, (x, y, z, w), )
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
test_register_functional_op_with_optional
def test_register_functional_op_with_optional(self): with _scoped_library(self.test_ns, "FRAGMENT") as lib: lib.define( "foo(Tensor x, Tensor(a!) y, Tensor (b!) z, Tensor(c!)? w) -> ()" ) def foo_impl(x, y, z, w): y.fill_(3.14) z.fill_(2.71) if w is not None: w.fill_(1.618) lib.impl("foo", foo_impl, "CPU") register_functional_op( lib, "foo_functional", getattr(torch.ops, self.test_ns).foo.default ) x = torch.randn([]) y = torch.randn([]) z = torch.randn([]) w = torch.randn([]) self._check_is_functional_variant( getattr(torch.ops, self.test_ns).foo.default, getattr(torch.ops, self.test_ns).foo_functional.default, (x, y, z, w), ) self._check_is_functional_variant( getattr(torch.ops, self.test_ns).foo.default, getattr(torch.ops, self.test_ns).foo_functional.default, (x, y, z, None), )
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonRegistration(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
foo_impl
def foo_impl(x, y, z, w): y.fill_(3.14) w.fill_(2.71) lib.impl("foo", foo_impl, "CPU") register_functional_op( lib, "foo_functional", getattr(torch.ops, self.test_ns).foo.default ) x = torch.randn([]) y = torch.randn([]) z = torch.randn([]) w = torch.randn([]) self._check_is_functional_variant( getattr(torch.ops, self.test_ns).foo.default, getattr(torch.ops, self.test_ns).foo_functional.default, (x, y, z, w), )
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
test_register_functional_op_one_return
def test_register_functional_op_one_return(self): with _scoped_library(self.test_ns, "FRAGMENT") as lib: lib.define( "foo(Tensor x, Tensor(a!) y, Tensor(c!) z, Tensor(b!) w) -> Tensor" ) def foo_impl(x, y, z, w): y.fill_(3.14) w.fill_(2.71) z.fill_(0.99) return x.clone() lib.impl("foo", foo_impl, "CPU") register_functional_op( lib, "foo_functional", getattr(torch.ops, self.test_ns).foo.default ) x = torch.randn([]) y = torch.randn([]) z = torch.randn([]) w = torch.randn([]) self._check_is_functional_variant( getattr(torch.ops, self.test_ns).foo.default, getattr(torch.ops, self.test_ns).foo_functional.default, (x, y, z, w), )
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonRegistration(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
foo_impl
def foo_impl(x, y, z, w): y.fill_(3.14) w.fill_(2.71) lib.impl("foo", foo_impl, "CPU") register_functional_op( lib, "foo_functional", getattr(torch.ops, self.test_ns).foo.default ) x = torch.randn([]) y = torch.randn([]) z = torch.randn([]) w = torch.randn([]) self._check_is_functional_variant( getattr(torch.ops, self.test_ns).foo.default, getattr(torch.ops, self.test_ns).foo_functional.default, (x, y, z, w), )
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
test_register_functional_op_multiple_returns
def test_register_functional_op_multiple_returns(self): with _scoped_library(self.test_ns, "FRAGMENT") as lib: lib.define( "foo(Tensor x, Tensor(a!) y, Tensor z, Tensor(b!) w) -> (Tensor, Tensor)" ) def foo_impl(x, y, z, w): y.fill_(3.14) w.fill_(2.71) return x.clone(), z.clone() lib.impl("foo", foo_impl, "CPU") register_functional_op( lib, "foo_functional", getattr(torch.ops, self.test_ns).foo.default ) x = torch.randn([]) y = torch.randn([]) z = torch.randn([]) w = torch.randn([]) self._check_is_functional_variant( getattr(torch.ops, self.test_ns).foo.default, getattr(torch.ops, self.test_ns).foo_functional.default, (x, y, z, w), )
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonRegistration(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
foo_impl
def foo_impl(x, y, z, w): y.fill_(3.14) w.fill_(2.71) lib.impl("foo", foo_impl, "CPU") register_functional_op( lib, "foo_functional", getattr(torch.ops, self.test_ns).foo.default ) x = torch.randn([]) y = torch.randn([]) z = torch.randn([]) w = torch.randn([]) self._check_is_functional_variant( getattr(torch.ops, self.test_ns).foo.default, getattr(torch.ops, self.test_ns).foo_functional.default, (x, y, z, w), )
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
test_notimplemented_mode
def test_notimplemented_mode(self): sub_count = 0 class PoliteMode(TorchDispatchMode): def __init__(self): self.pre_count = 0 self.post_count = 0 def __torch_dispatch__(self, func, types, args=(), kwargs=None): self.pre_count += 1 if any(t is not torch.Tensor for t in types): return NotImplemented self.post_count += 1 return func(*args, **kwargs) class SubTensor(torch.Tensor): def __new__(cls, elem): r = torch.Tensor._make_wrapper_subclass(cls, elem.shape) r.elem = elem return r @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): nonlocal sub_count sub_count += 1 def unwrap(t): if isinstance(t, SubTensor): return t.elem else: return t return func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs)) __torch_function__ = torch._C._disabled_torch_function_impl a = SubTensor(torch.randn(2)) with PoliteMode() as mode: a.abs() self.assertEqual(mode.pre_count, 2) self.assertEqual(mode.post_count, 1) self.assertEqual(sub_count, 1) # make sure this doesn't error with PoliteMode(): with PoliteMode(): a.abs()
def test_notimplemented_mode(self): sub_count = 0 class PoliteMode(TorchDispatchMode): def __init__(self) -> None: self.pre_count = 0 self.post_count = 0 def __torch_dispatch__(self, func, types, args=(), kwargs=None): self.pre_count += 1 if any(t is not torch.Tensor for t in types): return NotImplemented self.post_count += 1 return func(*args, **kwargs) class SubTensor(torch.Tensor): def __new__(cls, elem): r = torch.Tensor._make_wrapper_subclass(cls, elem.shape) r.elem = elem return r @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): nonlocal sub_count sub_count += 1 def unwrap(t): if isinstance(t, SubTensor): return t.elem else: return t return func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs)) a = SubTensor(torch.randn(2)) with PoliteMode() as mode: a.abs() self.assertEqual(mode.pre_count, 2) self.assertEqual(mode.post_count, 1) self.assertEqual(sub_count, 1) # make sure this doesn't error with PoliteMode(): with PoliteMode(): a.abs()
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class TestPythonDispatch(TestCase):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
__init__
def __init__(self, msg): return super().__init__(msg)
def __init__(self, msg): super().__init__(msg)
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class ErrorA(RuntimeError):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class ErrorA(RuntimeError): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
my_sum4
y = torch._efficientzerotensor(3) self.assertTrue(torch.ops.foo.sum(y)._is_zerotensor()) self.assertEqual(torch.ops.foo.sum(x), x) del my_lib2 del my_lib1
def my_sum4(*args, **kwargs): return args[0] x = torch.tensor([1, 2]) self.assertEqual(getattr(torch.ops, self.test_ns).sum4(x), x) # Create another fragment with _scoped_library(self.test_ns, "FRAGMENT") as my_lib3: my_lib3.define("sum3(Tensor self) -> Tensor") @torch.library.impl(my_lib3, "sum3", "CPU") def my_sum3(*args, **kwargs): return args[0] x = torch.tensor([1, 2]) self.assertEqual(getattr(torch.ops, self.test_ns).sum3(x), x)
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
test_helper
def test_helper(alias_analysis=""): my_lib1 = Library("foo", "DEF") called = [0] @torch.library.define(my_lib1, "_op() -> None", alias_analysis=alias_analysis) def _op(*args, **kwargs): called[0] += 1 @torch.jit.script def _test(): torch.ops.foo._op() assert "foo::_op" in str(_test.graph) with self.assertRaises(AssertionError): test_helper("") # alias_analysis="FROM_SCHEMA" test_helper("CONSERVATIVE")
def test_helper(alias_analysis=""): my_lib1 = Library(self.test_ns, "DEF") # noqa: TOR901 called = [0] @torch.library.define( my_lib1, "_op() -> None", alias_analysis=alias_analysis ) def _op(*args, **kwargs): called[0] += 1 @torch.jit.script def _test(): torch.ops._test_python_registration._op() assert "_test_python_registration::_op" in str(_test.graph) with self.assertRaises(AssertionError): test_helper("") # alias_analysis="FROM_SCHEMA" test_helper("CONSERVATIVE")
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
_test
def _test(): torch.ops.foo._op() assert "foo::_op" in str(_test.graph)
def _test(): torch.ops._test_python_registration._op() assert "_test_python_registration::_op" in str(_test.graph)
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
sqsum
def sqsum(a: SymInt, b: SymInt): return a * a + b * b out = getattr(torch.ops, self.test_ns).sqsum.default(s0, s1) out_val = shape_env.evaluate_expr(out.node.expr)
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
test_register_functional_op_error_cases
def test_register_functional_op_error_cases(self): with _scoped_library(self.test_ns, "FRAGMENT") as lib: with self.assertRaisesRegex(TypeError, "instance of OpOverload"): register_functional_op(lib, "abs", torch.ops.aten.abs_) with self.assertRaisesRegex(RuntimeError, "Expected op to be mutable"): register_functional_op(lib, "abs", torch.ops.aten.abs_.default) with self.assertRaisesRegex(RuntimeError, "Expected op to be mutable"): register_functional_op(lib, "abs", torch.ops.aten.abs.out) schemas = [ "foo(Tensor x, Tensor(a!)[] y) -> ()", "foo(Tensor x, Tensor(a!) y, Tensor(b) z) -> Tensor(b)", "foo(Tensor x, Tensor(a!) y) -> (Tensor, Tensor(a))", ] for schema in schemas: with _scoped_library(self.test_ns, "FRAGMENT") as lib: lib.define(schema) with self.assertRaisesRegex(RuntimeError, "NYI"): register_functional_op( lib, "foo_functional", getattr(torch.ops, self.test_ns).foo.default, )
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonRegistration(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
_check_is_functional_variant
def _check_is_functional_variant(self, mutable_op, functional_op, args): # functional op should not mutate cloned_args = pytree.tree_map_only(torch.Tensor, torch.clone, args) functional_result = functional_op(*cloned_args) self.assertEqual(cloned_args, args) # check functional_result includes mutable_result mutable_result = mutable_op(*cloned_args) if mutable_result is None: flat_mutable_result = [] else: flat_mutable_result = pytree.tree_leaves(mutable_result) flat_functional_result = pytree.tree_leaves(functional_result) assert len(flat_functional_result) > len(flat_mutable_result) self.assertEqual( flat_functional_result[: len(flat_mutable_result)], flat_mutable_result ) # check rest of functional_result is the mutated args mutated_args = [ maybe_mutated_arg for maybe_mutated_arg, arg in zip(cloned_args, args) if not ( maybe_mutated_arg is not None and arg is not None and torch.allclose(maybe_mutated_arg, arg) ) ] self.assertEqual( flat_functional_result[len(flat_mutable_result) :], mutated_args ) # check that functionalization kernel was indeed registered def fn(*args): cloned_args = pytree.tree_map_only(torch.Tensor, torch.clone, args) mutable_op(*cloned_args) return cloned_args gm = make_fx(torch.func.functionalize(fn))(*args) has_functional_op = False for node in gm.graph.nodes: self.assertFalse(node.target is mutable_op) if node.target is functional_op: has_functional_op = True self.assertTrue(has_functional_op)
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonRegistration(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
fn
def fn(*args): cloned_args = pytree.tree_map_only(torch.Tensor, torch.clone, args) mutable_op(*cloned_args) return cloned_args gm = make_fx(torch.func.functionalize(fn))(*args) has_functional_op = False for node in gm.graph.nodes: self.assertFalse(node.target is mutable_op) if node.target is functional_op: has_functional_op = True self.assertTrue(has_functional_op)
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
test_exception_handling
def test_exception_handling(self): class A(torch.Tensor): @staticmethod def __new__(cls, elem): return torch.Tensor._make_subclass(cls, elem, elem.requires_grad) class AMode(TorchDispatchMode): def __torch_dispatch__(self, func, types, args=(), kwargs=None): if func.__name__ == 'randn.default': raise RuntimeError() return A(torch.zeros(())) with AMode(): try: torch.randn(()) except RuntimeError: pass self.assertTrue(isinstance(torch.zeros(()), A))
def test_exception_handling(self): class A(torch.Tensor): @staticmethod def __new__(cls, elem): return torch.Tensor._make_subclass(cls, elem, elem.requires_grad) class AMode(TorchDispatchMode): def __torch_dispatch__(self, func, types, args=(), kwargs=None): if func.__name__ == "randn.default": raise RuntimeError return A(torch.zeros(())) with AMode(): try: torch.randn(()) except RuntimeError: pass self.assertTrue(isinstance(torch.zeros(()), A))
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class TestPythonDispatch(TestCase):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_with_mode_created_separately
def test_with_mode_created_separately(self): class ErrorA(RuntimeError): pass class A(TorchDispatchMode): def __torch_dispatch__(self, func, types, args=(), kwargs=None): raise ErrorA() x = A() with self.assertRaises(ErrorA): with x: torch.empty([])
def test_with_mode_created_separately(self): class ErrorA(RuntimeError): pass class A(TorchDispatchMode): def __torch_dispatch__(self, func, types, args=(), kwargs=None): raise ErrorA x = A() with self.assertRaises(ErrorA): with x: torch.empty([])
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class TestPythonDispatch(TestCase):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_with_nested_modes
def test_with_nested_modes(self): class ErrorA(RuntimeError): def __init__(self, msg): return super().__init__(msg) class A(TorchDispatchMode): def __init__(self, msg): self.msg = msg def __torch_dispatch__(self, func, types, args=(), kwargs=None): raise ErrorA(self.msg) with self.assertRaisesRegex(ErrorA, "layer2"): with A("layer1"): with A("layer2"): torch.empty([])
def test_with_nested_modes(self): class ErrorA(RuntimeError): def __init__(self, msg): super().__init__(msg) class A(TorchDispatchMode): def __init__(self, msg): self.msg = msg def __torch_dispatch__(self, func, types, args=(), kwargs=None): raise ErrorA(self.msg) with self.assertRaisesRegex(ErrorA, "layer2"): with A("layer1"): with A("layer2"): torch.empty([])
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class TestPythonDispatch(TestCase):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
__init__
def __init__(self, msg): return super().__init__(msg)
def __init__(self, msg): super().__init__(msg)
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class ErrorA(RuntimeError):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class ErrorA(RuntimeError): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
__init__
def __init__(self, msg): return super().__init__(msg)
def __init__(self, msg): super().__init__(msg)
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class ErrorA(RuntimeError):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class ErrorA(RuntimeError): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_mode_detection
def test_mode_detection(self): class InfraMode(TorchDispatchMode): @classmethod def is_infra_mode(cls): return True class NonInfraMode(TorchDispatchMode): pass with InfraMode(): self.assertTrue(is_in_torch_dispatch_mode()) self.assertFalse(is_in_torch_dispatch_mode(include_infra_modes=False)) with NonInfraMode(): self.assertTrue(is_in_torch_dispatch_mode()) self.assertTrue(is_in_torch_dispatch_mode(include_infra_modes=False)) with InfraMode(): self.assertTrue(is_in_torch_dispatch_mode()) self.assertTrue( is_in_torch_dispatch_mode(include_infra_modes=False) ) self.assertTrue(is_in_torch_dispatch_mode()) self.assertTrue(is_in_torch_dispatch_mode(include_infra_modes=False)) self.assertTrue(is_in_torch_dispatch_mode()) self.assertFalse(is_in_torch_dispatch_mode(include_infra_modes=False)) self.assertFalse(is_in_torch_dispatch_mode()) self.assertFalse(is_in_torch_dispatch_mode(include_infra_modes=False))
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
is_infra_mode
def test_tolist_numpy_with_torch_dispatch_mode(self) -> None: x = LoggingTensor(torch.tensor([2.0, 3.0])) with self.assertRaisesRegex(RuntimeError, "is not supported for tensor subclasses."): x.tolist() with self.assertRaisesRegex(RuntimeError, "is not supported for tensor subclasses."): x.numpy() with self.assertRaises(AssertionError): self.assertEqual(x, None)
def is_infra_mode(cls): return True
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class InfraMode(TorchDispatchMode): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
__init__
def __init__(self, msg): return super().__init__(msg)
def __init__(self, msg): super().__init__(msg)
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class ErrorA(RuntimeError):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class ErrorA(RuntimeError): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_dispatch_super_call
def test_dispatch_super_call(self): called = [] class SubTensor(torch.Tensor): @staticmethod def __new__(cls, elem): return torch.Tensor._make_subclass(cls, elem) __torch_function__ = torch._C._disabled_torch_function_impl @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): called.append(func) return super().__torch_dispatch__(func, types, args, kwargs) x = torch.randn(2) y = torch.randn(2) self.assertEqual(SubTensor(x) + SubTensor(y), x + y) self.assertEqual(called, [torch.ops.aten.add.Tensor])
def test_dispatch_super_call(self): called = [] class SubTensor(torch.Tensor): @staticmethod def __new__(cls, elem): return torch.Tensor._make_subclass(cls, elem) @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): called.append(func) return super().__torch_dispatch__(func, types, args, kwargs) x = torch.randn(2) y = torch.randn(2) self.assertEqual(SubTensor(x) + SubTensor(y), x + y) self.assertEqual(called, [torch.ops.aten.add.Tensor])
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class TestPythonDispatch(TestCase):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_dispatch_super_call_list_arg
def test_dispatch_super_call_list_arg(self): called = [] class SubTensorWithListArg(torch.Tensor): @staticmethod def __new__(cls, elem): return torch.Tensor._make_subclass(cls, elem) __torch_function__ = torch._C._disabled_torch_function_impl @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): called.append(func) return super().__torch_dispatch__(func, types, list(args), kwargs) x = torch.randn(2) self.assertEqual(SubTensorWithListArg(x).neg(), x.neg()) self.assertEqual(called, [torch.ops.aten.neg.default])
def test_dispatch_super_call_list_arg(self): called = [] class SubTensorWithListArg(torch.Tensor): @staticmethod def __new__(cls, elem): return torch.Tensor._make_subclass(cls, elem) @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): called.append(func) return super().__torch_dispatch__(func, types, list(args), kwargs) x = torch.randn(2) self.assertEqual(SubTensorWithListArg(x).neg(), x.neg()) self.assertEqual(called, [torch.ops.aten.neg.default])
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class TestPythonDispatch(TestCase):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_dispatch_super_dont_autograd
def test_dispatch_super_dont_autograd(self): called = [] class SubTensor(torch.Tensor): @staticmethod def __new__(cls, elem): return torch.Tensor._make_subclass(cls, elem, elem.requires_grad) __torch_function__ = torch._C._disabled_torch_function_impl @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): called.append(func) # This argument still requires grad because it was passed # through directly... self.assertTrue(args[0].requires_grad) r = super().__torch_dispatch__(func, types, args, kwargs) # But the output better not require grad, because that means # you did autograd again in torch dispatch (oops) self.assertFalse(r.requires_grad) return r x = SubTensor(torch.randn(2, requires_grad=True)) x.neg() self.assertEqual(called, [torch.ops.aten.neg.default])
def test_dispatch_super_dont_autograd(self): called = [] class SubTensor(torch.Tensor): @staticmethod def __new__(cls, elem): return torch.Tensor._make_subclass(cls, elem, elem.requires_grad) @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): called.append(func) # This argument still requires grad because it was passed # through directly... self.assertTrue(args[0].requires_grad) r = super().__torch_dispatch__(func, types, args, kwargs) # But the output better not require grad, because that means # you did autograd again in torch dispatch (oops) self.assertFalse(r.requires_grad) return r x = SubTensor(torch.randn(2, requires_grad=True)) x.neg() self.assertEqual(called, [torch.ops.aten.neg.default])
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class TestPythonDispatch(TestCase):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_dim_slowpath
def test_dim_slowpath(self): data = torch.randn(3, 3) for use_wrapper_subclass in [True, False]: class DimNotImplementedTensor(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes") @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): return NotImplemented class DimImplementedTensor(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes") @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.aten.dim: return data.dim() return NotImplemented err_msg = "no implementation found for 'torch.ops.aten.dim'" e = DimNotImplementedTensor(torch.randn(3, 3), use_wrapper_subclass) with self.assertRaisesRegex(TypeError, err_msg): e.dim() t = DimImplementedTensor(torch.randn(3, 3), use_wrapper_subclass) self.assertEqual(t.dim(), 2)
def test_dim_slowpath(self): data = torch.randn(3, 3) for use_wrapper_subclass in [True, False]: class DimNotImplementedTensor(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_sizes_strides_policy="sizes" ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): return NotImplemented class DimImplementedTensor(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_sizes_strides_policy="sizes" ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.aten.dim: return data.dim() return NotImplemented err_msg = "Multiple dispatch failed for 'torch.ops.aten.dim'" e = DimNotImplementedTensor(torch.randn(3, 3), use_wrapper_subclass) with self.assertRaisesRegex(TypeError, err_msg): e.dim() t = DimImplementedTensor(torch.randn(3, 3), use_wrapper_subclass) self.assertEqual(t.dim(), 2)
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class TestPythonDispatch(TestCase):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_set_data
def test_set_data(self): called = 0 class SubTensor(torch.Tensor): __torch_function__ = torch._C._disabled_torch_function_impl @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): nonlocal called called += 1 return super().__torch_dispatch__(func, types, args, kwargs) x = SubTensor(torch.empty(2)) x.data self.assertEqual(called, 1) x.data = torch.empty(2) self.assertEqual(called, 1) x.data self.assertEqual(called, 2) self.assertIs(type(x), SubTensor) x.set_(torch.empty(2)) self.assertEqual(called, 3) x.data self.assertEqual(called, 4) self.assertIs(type(x), SubTensor)
def test_set_data(self): called = 0 class SubTensor(torch.Tensor): @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): nonlocal called called += 1 return super().__torch_dispatch__(func, types, args, kwargs) x = SubTensor(torch.empty(2)) x.data self.assertEqual(called, 1) x.data = torch.empty(2) self.assertEqual(called, 1) x.data self.assertEqual(called, 2) self.assertIs(type(x), SubTensor) x.set_(torch.empty(2)) self.assertEqual(called, 3) x.data self.assertEqual(called, 4) self.assertIs(type(x), SubTensor)
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class TestPythonDispatch(TestCase):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_is_contiguous_slow_path
def test_is_contiguous_slow_path(self): data = torch.randn(3, 3) contiguous_data = data.clone() not_contiguous_data = torch.as_strided(data.clone(), (2, 2), (1, 2)) for use_wrapper_subclass in [True, False]: class ExampleTensor1(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides") @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): return NotImplemented class ExampleTensor2(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides") @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.aten.is_contiguous: return contiguous_data.is_contiguous() return NotImplemented class ExampleTensor3(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides") @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.aten.is_contiguous: return not_contiguous_data.is_contiguous() return NotImplemented err_msg = "no implementation found for 'torch.ops.aten.is_contiguous'" e = ExampleTensor1(torch.randn(3, 3), use_wrapper_subclass) with self.assertRaisesRegex(TypeError, err_msg): e.is_contiguous() with self.assertRaisesRegex(TypeError, err_msg): e.contiguous() e = ExampleTensor2(torch.randn(3, 3), use_wrapper_subclass) self.assertEqual(e.is_contiguous(), True) e.contiguous() # this will just return the original TensorImpl since is_contiguous = True err_msg = "no implementation found for" e = ExampleTensor3(torch.randn(3, 3), use_wrapper_subclass) self.assertEqual(e.is_contiguous(), False) with self.assertRaisesRegex(TypeError, err_msg): e.contiguous()
def test_is_contiguous_slow_path(self): data = torch.randn(3, 3) contiguous_data = data.clone() not_contiguous_data = torch.as_strided(data.clone(), (2, 2), (1, 2)) for use_wrapper_subclass in [True, False]: class ExampleTensor1(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_sizes_strides_policy="strides" ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): return NotImplemented class ExampleTensor2(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_sizes_strides_policy="strides" ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.aten.is_contiguous: return contiguous_data.is_contiguous() return NotImplemented class ExampleTensor3(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_sizes_strides_policy="strides" ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.aten.is_contiguous: return not_contiguous_data.is_contiguous() return NotImplemented err_msg = "Multiple dispatch failed for 'torch.ops.aten.is_contiguous'" e = ExampleTensor1(torch.randn(3, 3), use_wrapper_subclass) with self.assertRaisesRegex(TypeError, err_msg): e.is_contiguous() with self.assertRaisesRegex(TypeError, err_msg): e.contiguous() e = ExampleTensor2(torch.randn(3, 3), use_wrapper_subclass) self.assertEqual(e.is_contiguous(), True) e.contiguous() # this will just return the original TensorImpl since is_contiguous = True err_msg = "Multiple dispatch failed for" e = ExampleTensor3(torch.randn(3, 3), use_wrapper_subclass) self.assertEqual(e.is_contiguous(), False) with self.assertRaisesRegex(TypeError, err_msg): e.contiguous()
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class TestPythonDispatch(TestCase):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_sym_sizes_strides_slow_path
def test_sym_sizes_strides_slow_path(self): class TestTensor(torch.Tensor): @staticmethod def __new__(cls, *args, **kwargs): r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] cls, (0,), dispatch_sizes_strides_policy="sizes" ) return r @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): if func in ( torch.ops.aten.sym_size.default, torch.ops.aten.sym_stride.default, ): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, ) shape_env = ShapeEnv() si = shape_env.create_symintnode( shape_env.create_symbol( 123, source=ConstantSource("abc"), dynamic_dim=DimDynamic.DUCK, constraint_dim=None, ), hint=123, ) return (si,) t = TestTensor() si = t.size()[0] self.assertIsInstance(si, torch.SymInt) si = t.stride()[0] self.assertIsInstance(si, torch.SymInt)
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
test_strides_slow_path
def test_strides_slow_path(self): for use_wrapper_subclass in [True, False]: class StridesNotImplemented(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides") @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): return NotImplemented class StridesCustomReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides") @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func == torch.ops.aten.sym_stride.default: return (4, 2) return NotImplemented class StridesDefaultReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides") @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func == torch.ops.aten.sym_stride.default: return None return NotImplemented err_msg = "no implementation found for 'torch.ops.aten.sym_stride'" e = StridesNotImplemented(torch.randn(3, 3), use_wrapper_subclass) with self.assertRaisesRegex(TypeError, err_msg): e.stride() e = StridesCustomReturn(torch.randn(3, 3), use_wrapper_subclass) self.assertEqual(e.stride(), (4, 2)) e = StridesDefaultReturn(torch.randn(6, 2), use_wrapper_subclass) self.assertEqual(e.stride(), (2, 1))
def test_strides_slow_path(self): for use_wrapper_subclass in [True, False]: class StridesNotImplemented(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_sizes_strides_policy="strides" ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): return NotImplemented class StridesCustomReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_sizes_strides_policy="strides" ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func == torch.ops.aten.sym_stride.default: return (4, 2) return NotImplemented class StridesDefaultReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_sizes_strides_policy="strides" ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func == torch.ops.aten.sym_stride.default: return None return NotImplemented err_msg = "Multiple dispatch failed for 'torch.ops.aten.sym_stride'" e = StridesNotImplemented(torch.randn(3, 3), use_wrapper_subclass) with self.assertRaisesRegex(TypeError, err_msg): e.stride() e = StridesCustomReturn(torch.randn(3, 3), use_wrapper_subclass) self.assertEqual(e.stride(), (4, 2)) e = StridesDefaultReturn(torch.randn(6, 2), use_wrapper_subclass) self.assertEqual(e.stride(), (2, 1))
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class TestPythonDispatch(TestCase):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_sizes_slow_path
def test_sizes_slow_path(self): for use_wrapper_subclass in [True, False]: data = torch.randn(6, 2) class SizesNotImplemented(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes") @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.aten.dim: return data.dim() return NotImplemented class SizesCustomReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes") @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.aten.dim: return data.dim() if func.overloadpacket == torch.ops.aten.sym_size: return (5, 3) return NotImplemented class SizesDefaultReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes") @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.aten.dim: return data.dim() if func.overloadpacket == torch.ops.aten.sym_size: return None return NotImplemented err_msg = "no implementation found for 'torch.ops.aten.sym_size'" e = SizesNotImplemented(torch.randn(3, 3), use_wrapper_subclass) with self.assertRaisesRegex(TypeError, err_msg): e.size() e = SizesCustomReturn(torch.randn(3, 3), use_wrapper_subclass) self.assertEqual(e.size(), (5, 3)) e = SizesDefaultReturn(torch.randn(4, 2), use_wrapper_subclass) self.assertEqual(e.size(), (4, 2))
def test_sizes_slow_path(self): for use_wrapper_subclass in [True, False]: data = torch.randn(6, 2) class SizesNotImplemented(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_sizes_strides_policy="sizes" ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.aten.dim: return data.dim() return NotImplemented class SizesCustomReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_sizes_strides_policy="sizes" ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.aten.dim: return data.dim() if func.overloadpacket == torch.ops.aten.sym_size: return (5, 3) return NotImplemented class SizesDefaultReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_sizes_strides_policy="sizes" ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.aten.dim: return data.dim() if func.overloadpacket == torch.ops.aten.sym_size: return None return NotImplemented err_msg = "Multiple dispatch failed for 'torch.ops.aten.sym_size'" e = SizesNotImplemented(torch.randn(3, 3), use_wrapper_subclass) with self.assertRaisesRegex(TypeError, err_msg): e.size() e = SizesCustomReturn(torch.randn(3, 3), use_wrapper_subclass) self.assertEqual(e.size(), (5, 3)) e = SizesDefaultReturn(torch.randn(4, 2), use_wrapper_subclass) self.assertEqual(e.size(), (4, 2))
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class TestPythonDispatch(TestCase):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
__init__
def __init__(self, msg): return super().__init__(msg)
def __init__(self, msg): super().__init__(msg)
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class ErrorA(RuntimeError):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class ErrorA(RuntimeError): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_device_slowpath
def test_device_slowpath(self): for use_wrapper_subclass in [True]: class ExampleTensor1(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_device=True) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): return NotImplemented class ExampleTensor2(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_device=True) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.prim.device: return torch.device('meta') return NotImplemented class ExampleTensor3(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_device=True) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.prim.device: return torch.device('meta') return NotImplemented err_msg = "no implementation found for 'torch.ops.prim.device'" with self.assertRaisesRegex(TypeError, err_msg): e = ExampleTensor1(torch.randn(3, 3), use_wrapper_subclass) e.device() ten = torch.rand([1]) e = ExampleTensor2(torch.randn(3, 3, device='cpu'), use_wrapper_subclass) self.assertEqual(e.device.type, 'meta') self.assertEqual(ten.type_as(e).device.type, 'meta') e = ExampleTensor3(torch.randn(3, 3, device='cpu'), use_wrapper_subclass) self.assertEqual(e.device.type, 'meta') self.assertEqual(ten.type_as(e).device.type, 'meta')
def test_device_slowpath(self): for use_wrapper_subclass in [True]: class ExampleTensor1(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_device=True ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): return NotImplemented class ExampleTensor2(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_device=True ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.prim.device: return torch.device("meta") return NotImplemented class ExampleTensor3(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_device=True ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.prim.device: return torch.device("meta") return NotImplemented err_msg = "Multiple dispatch failed for 'torch.ops.prim.device'" with self.assertRaisesRegex(TypeError, err_msg): e = ExampleTensor1(torch.randn(3, 3), use_wrapper_subclass) e.device() ten = torch.rand([1]) e = ExampleTensor2(torch.randn(3, 3, device="cpu"), use_wrapper_subclass) self.assertEqual(e.device.type, "meta") self.assertEqual(ten.type_as(e).device.type, "meta") e = ExampleTensor3(torch.randn(3, 3, device="cpu"), use_wrapper_subclass) self.assertEqual(e.device.type, "meta") self.assertEqual(ten.type_as(e).device.type, "meta")
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class TestPythonDispatch(TestCase):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_data_ptr_respects_numel_slow_path
def test_data_ptr_respects_numel_slow_path(self): data = torch.randn(6, 2) class NumelDefaultReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_sizes_strides_policy="sizes" ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.aten.dim: return data.dim() if func.overloadpacket == torch.ops.aten.numel: numel_called[0] = True return None return NotImplemented for use_wrapper_subclass in (False, True): numel_called = [False] e = NumelDefaultReturn(torch.randn(2, 2), use_wrapper_subclass) e.data_ptr() self.assertTrue(numel_called[0])
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
test_layout_slow_path
def test_layout_slow_path(self): for use_wrapper_subclass in [True, False]: data = torch.randn(6, 2) class LayoutNotImplemented(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_layout=True) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): return NotImplemented class LayoutCustomReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_layout=True) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.prim.layout: return torch.sparse_csr return NotImplemented class LayoutDefaultReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_layout=True) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.prim.layout: return data.layout return NotImplemented err_msg = "no implementation found for 'torch.ops.prim.layout'" e = LayoutNotImplemented(torch.randn(3, 3), use_wrapper_subclass) with self.assertRaisesRegex(TypeError, err_msg): e.layout e = LayoutCustomReturn(torch.randn(3, 3), use_wrapper_subclass) self.assertEqual(e.layout, torch.sparse_csr) e = LayoutDefaultReturn(torch.randn(4, 2), use_wrapper_subclass) self.assertEqual(e.layout, torch.strided)
def test_layout_slow_path(self): for use_wrapper_subclass in [True, False]: data = torch.randn(6, 2) class LayoutNotImplemented(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_layout=True ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): return NotImplemented class LayoutCustomReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_layout=True ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.prim.layout: return torch.sparse_csr return NotImplemented class LayoutDefaultReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): return TestPythonDispatch.subclass_helper( cls, data, wrapper, dispatch_layout=True ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.prim.layout: return data.layout return NotImplemented err_msg = "Multiple dispatch failed for 'torch.ops.prim.layout'" e = LayoutNotImplemented(torch.randn(3, 3), use_wrapper_subclass) with self.assertRaisesRegex(TypeError, err_msg): e.layout e = LayoutCustomReturn(torch.randn(3, 3), use_wrapper_subclass) self.assertEqual(e.layout, torch.sparse_csr) e = LayoutDefaultReturn(torch.randn(4, 2), use_wrapper_subclass) self.assertEqual(e.layout, torch.strided)
import tempfile import torch from copy import deepcopy from torch.library import Library from torch.cuda.jiterator import _create_jit_fn import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_ROCM, IS_WINDOWS from torch.utils._mode_utils import no_dispatch, all_same_mode from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ log_input, capture_logs, capture_logs_with_logging_tensor_mode from torch.utils._pytree import tree_map, tree_map_only from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack import logging class TestPythonDispatch(TestCase):
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only class TestPythonDispatch(TestCase): from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_python_dispatch.py
test_wrapper_subclass_aliasing_custom
def test_wrapper_subclass_aliasing_custom(self, device, dtype, op): samples = op.sample_inputs(device, dtype) sample = first_sample(self, samples) args = (sample.input, *sample.args) kwargs = sample.kwargs self._test_wrapper_subclass_aliasing(op, args, kwargs)
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, ) class TestWrapperSubclassAliasing(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
test_wrapper_subclass_aliasing_conv2d
def test_wrapper_subclass_aliasing_conv2d(self, device): args = (torch.randn(4, 4, 4, 4), torch.randn(4, 4, 4, 4)) kwargs = {} # conv2d has a default arg 'int[2] strides=0', # which torchscript expands into 'int[2] strides=[0, 0]' # Make sure that _return_and_correct_aliasing can handle this case # (I'm using inference_mode to make sure conv2d doesn't decompose and goes to torch_dispatch) with torch.inference_mode(): self._test_wrapper_subclass_aliasing( torch.ops.aten.conv2d.default, args, kwargs )
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, ) class TestWrapperSubclassAliasing(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_python_dispatch.py
test_wrapper_subclass_aliasing_out_op
run_tests()
def test_wrapper_subclass_aliasing_out_op(self, device): # Make sure that _return_and_correct_aliasing can handle kwargs w mutable tensors args = (torch.ones(4), torch.ones(4)) kwargs = {"out": torch.empty(4)} self._test_wrapper_subclass_aliasing(torch.ops.aten.add.out, args, kwargs)
import logging import sys import tempfile import unittest from copy import deepcopy import torch import torch._dynamo from torch import SymInt from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.multiprocessing.reductions import StorageWeakRef from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, ops, ) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_utils import ( first_sample, IS_WINDOWS, run_tests, TEST_WITH_ROCM, TestCase, ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.logging_tensor import ( capture_logs, capture_logs_with_logging_tensor_mode, log_input, LoggingTensor, LoggingTensorMode, LoggingTensorReentrant, ) from torch.testing._internal.two_tensor import TwoTensor from torch.utils import _pytree as pytree from torch.utils._mode_utils import all_same_mode, no_dispatch from torch.utils._python_dispatch import ( _get_current_dispatch_mode, _get_current_dispatch_mode_stack, is_in_torch_dispatch_mode, TorchDispatchMode, ) from torch.utils._pytree import tree_map, tree_map_only from torch._dynamo.source import ConstantSource from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, ) class TestWrapperSubclassAliasing(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
__init__
def __init__(self, x, y): self.x = x self.y = y
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class GlobalDummyType: import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
run_test_with_leaf
def run_test_with_leaf(leaf): values, treespec = tree_flatten(leaf) self.assertEqual(values, [leaf]) self.assertEqual(treespec, LeafSpec()) unflattened = tree_unflatten(values, treespec) self.assertEqual(unflattened, leaf) run_test_with_leaf(1) run_test_with_leaf(1.) run_test_with_leaf(None) run_test_with_leaf(bool) run_test_with_leaf(torch.randn(3, 3))
def run_test_with_leaf(leaf): values, treespec = pytree_impl.tree_flatten(leaf) self.assertEqual(values, [leaf]) self.assertEqual(treespec, pytree_impl.LeafSpec()) unflattened = pytree_impl.tree_unflatten(values, treespec) self.assertEqual(unflattened, leaf) run_test_with_leaf(1) run_test_with_leaf(1.0) run_test_with_leaf(None) run_test_with_leaf(bool) run_test_with_leaf(torch.randn(3, 3))
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_pytree.py
run_test
def run_test(lst): expected_spec = TreeSpec(list, None, [LeafSpec() for _ in lst]) values, treespec = tree_flatten(lst) self.assertTrue(isinstance(values, list)) self.assertEqual(values, lst) self.assertEqual(treespec, expected_spec) unflattened = tree_unflatten(values, treespec) self.assertEqual(unflattened, lst) self.assertTrue(isinstance(unflattened, list)) run_test([]) run_test([1., 2]) run_test([torch.tensor([1., 2]), 2, 10, 9, 11])
def run_test(tup): expected_spec = gen_expected_fn(tup) values, treespec = pytree_impl.tree_flatten(tup) self.assertIsInstance(values, list) self.assertEqual(values, list(tup)) self.assertEqual(treespec, expected_spec) unflattened = pytree_impl.tree_unflatten(values, treespec) self.assertEqual(unflattened, tup) self.assertIsInstance(unflattened, tuple) run_test(()) run_test((1.0,)) run_test((1.0, 2)) run_test((torch.tensor([1.0, 2]), 2, 10, 9, 11))
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_pytree.py
run_test
def run_test(lst): expected_spec = TreeSpec(list, None, [LeafSpec() for _ in lst]) values, treespec = tree_flatten(lst) self.assertTrue(isinstance(values, list)) self.assertEqual(values, lst) self.assertEqual(treespec, expected_spec) unflattened = tree_unflatten(values, treespec) self.assertEqual(unflattened, lst) self.assertTrue(isinstance(unflattened, list)) run_test([]) run_test([1., 2]) run_test([torch.tensor([1., 2]), 2, 10, 9, 11])
def run_test(tup): expected_spec = gen_expected_fn(tup) values, treespec = pytree_impl.tree_flatten(tup) self.assertIsInstance(values, list) self.assertEqual(values, list(tup)) self.assertEqual(treespec, expected_spec) unflattened = pytree_impl.tree_unflatten(values, treespec) self.assertEqual(unflattened, tup) self.assertIsInstance(unflattened, tuple) run_test(()) run_test((1.0,)) run_test((1.0, 2)) run_test((torch.tensor([1.0, 2]), 2, 10, 9, 11))
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_pytree.py
run_test
def run_test(lst): expected_spec = TreeSpec(list, None, [LeafSpec() for _ in lst]) values, treespec = tree_flatten(lst) self.assertTrue(isinstance(values, list)) self.assertEqual(values, lst) self.assertEqual(treespec, expected_spec) unflattened = tree_unflatten(values, treespec) self.assertEqual(unflattened, lst) self.assertTrue(isinstance(unflattened, list)) run_test([]) run_test([1., 2]) run_test([torch.tensor([1., 2]), 2, 10, 9, 11])
def run_test(tup): expected_spec = gen_expected_fn(tup) values, treespec = pytree_impl.tree_flatten(tup) self.assertIsInstance(values, list) self.assertEqual(values, list(tup)) self.assertEqual(treespec, expected_spec) unflattened = pytree_impl.tree_unflatten(values, treespec) self.assertEqual(unflattened, tup) self.assertIsInstance(unflattened, tuple) run_test(()) run_test((1.0,)) run_test((1.0, 2)) run_test((torch.tensor([1.0, 2]), 2, 10, 9, 11))
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_pytree.py
run_test
def run_test(lst): expected_spec = TreeSpec(list, None, [LeafSpec() for _ in lst]) values, treespec = tree_flatten(lst) self.assertTrue(isinstance(values, list)) self.assertEqual(values, lst) self.assertEqual(treespec, expected_spec) unflattened = tree_unflatten(values, treespec) self.assertEqual(unflattened, lst) self.assertTrue(isinstance(unflattened, list)) run_test([]) run_test([1., 2]) run_test([torch.tensor([1., 2]), 2, 10, 9, 11])
def run_test(tup): expected_spec = gen_expected_fn(tup) values, treespec = pytree_impl.tree_flatten(tup) self.assertIsInstance(values, list) self.assertEqual(values, list(tup)) self.assertEqual(treespec, expected_spec) unflattened = pytree_impl.tree_unflatten(values, treespec) self.assertEqual(unflattened, tup) self.assertIsInstance(unflattened, tuple) run_test(()) run_test((1.0,)) run_test((1.0, 2)) run_test((torch.tensor([1.0, 2]), 2, 10, 9, 11))
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_pytree.py
run_test
def run_test(lst): expected_spec = TreeSpec(list, None, [LeafSpec() for _ in lst]) values, treespec = tree_flatten(lst) self.assertTrue(isinstance(values, list)) self.assertEqual(values, lst) self.assertEqual(treespec, expected_spec) unflattened = tree_unflatten(values, treespec) self.assertEqual(unflattened, lst) self.assertTrue(isinstance(unflattened, list)) run_test([]) run_test([1., 2]) run_test([torch.tensor([1., 2]), 2, 10, 9, 11])
def run_test(tup): expected_spec = gen_expected_fn(tup) values, treespec = pytree_impl.tree_flatten(tup) self.assertIsInstance(values, list) self.assertEqual(values, list(tup)) self.assertEqual(treespec, expected_spec) unflattened = pytree_impl.tree_unflatten(values, treespec) self.assertEqual(unflattened, tup) self.assertIsInstance(unflattened, tuple) run_test(()) run_test((1.0,)) run_test((1.0, 2)) run_test((torch.tensor([1.0, 2]), 2, 10, 9, 11))
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_pytree.py
run_test
def run_test(lst): expected_spec = TreeSpec(list, None, [LeafSpec() for _ in lst]) values, treespec = tree_flatten(lst) self.assertTrue(isinstance(values, list)) self.assertEqual(values, lst) self.assertEqual(treespec, expected_spec) unflattened = tree_unflatten(values, treespec) self.assertEqual(unflattened, lst) self.assertTrue(isinstance(unflattened, list)) run_test([]) run_test([1., 2]) run_test([torch.tensor([1., 2]), 2, 10, 9, 11])
def run_test(tup): expected_spec = gen_expected_fn(tup) values, treespec = pytree_impl.tree_flatten(tup) self.assertIsInstance(values, list) self.assertEqual(values, list(tup)) self.assertEqual(treespec, expected_spec) unflattened = pytree_impl.tree_unflatten(values, treespec) self.assertEqual(unflattened, tup) self.assertIsInstance(unflattened, tuple) run_test(()) run_test((1.0,)) run_test((1.0, 2)) run_test((torch.tensor([1.0, 2]), 2, 10, 9, 11))
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_pytree.py
run_test
def run_test(lst): expected_spec = TreeSpec(list, None, [LeafSpec() for _ in lst]) values, treespec = tree_flatten(lst) self.assertTrue(isinstance(values, list)) self.assertEqual(values, lst) self.assertEqual(treespec, expected_spec) unflattened = tree_unflatten(values, treespec) self.assertEqual(unflattened, lst) self.assertTrue(isinstance(unflattened, list)) run_test([]) run_test([1., 2]) run_test([torch.tensor([1., 2]), 2, 10, 9, 11])
def run_test(tup): expected_spec = gen_expected_fn(tup) values, treespec = pytree_impl.tree_flatten(tup) self.assertIsInstance(values, list) self.assertEqual(values, list(tup)) self.assertEqual(treespec, expected_spec) unflattened = pytree_impl.tree_unflatten(values, treespec) self.assertEqual(unflattened, tup) self.assertIsInstance(unflattened, tuple) run_test(()) run_test((1.0,)) run_test((1.0, 2)) run_test((torch.tensor([1.0, 2]), 2, 10, 9, 11))
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_pytree.py
run_test
def run_test(lst): expected_spec = TreeSpec(list, None, [LeafSpec() for _ in lst]) values, treespec = tree_flatten(lst) self.assertTrue(isinstance(values, list)) self.assertEqual(values, lst) self.assertEqual(treespec, expected_spec) unflattened = tree_unflatten(values, treespec) self.assertEqual(unflattened, lst) self.assertTrue(isinstance(unflattened, list)) run_test([]) run_test([1., 2]) run_test([torch.tensor([1., 2]), 2, 10, 9, 11])
def run_test(tup): expected_spec = gen_expected_fn(tup) values, treespec = pytree_impl.tree_flatten(tup) self.assertIsInstance(values, list) self.assertEqual(values, list(tup)) self.assertEqual(treespec, expected_spec) unflattened = pytree_impl.tree_unflatten(values, treespec) self.assertEqual(unflattened, tup) self.assertIsInstance(unflattened, tuple) run_test(()) run_test((1.0,)) run_test((1.0, 2)) run_test((torch.tensor([1.0, 2]), 2, 10, 9, 11))
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_pytree.py
run_test
def run_test(lst): expected_spec = TreeSpec(list, None, [LeafSpec() for _ in lst]) values, treespec = tree_flatten(lst) self.assertTrue(isinstance(values, list)) self.assertEqual(values, lst) self.assertEqual(treespec, expected_spec) unflattened = tree_unflatten(values, treespec) self.assertEqual(unflattened, lst) self.assertTrue(isinstance(unflattened, list)) run_test([]) run_test([1., 2]) run_test([torch.tensor([1., 2]), 2, 10, 9, 11])
def run_test(tup): expected_spec = gen_expected_fn(tup) values, treespec = pytree_impl.tree_flatten(tup) self.assertIsInstance(values, list) self.assertEqual(values, list(tup)) self.assertEqual(treespec, expected_spec) unflattened = pytree_impl.tree_unflatten(values, treespec) self.assertEqual(unflattened, tup) self.assertIsInstance(unflattened, tuple) run_test(()) run_test((1.0,)) run_test((1.0, 2)) run_test((torch.tensor([1.0, 2]), 2, 10, 9, 11))
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_pytree.py
run_test
def run_test(lst): expected_spec = TreeSpec(list, None, [LeafSpec() for _ in lst]) values, treespec = tree_flatten(lst) self.assertTrue(isinstance(values, list)) self.assertEqual(values, lst) self.assertEqual(treespec, expected_spec) unflattened = tree_unflatten(values, treespec) self.assertEqual(unflattened, lst) self.assertTrue(isinstance(unflattened, list)) run_test([]) run_test([1., 2]) run_test([torch.tensor([1., 2]), 2, 10, 9, 11])
def run_test(tup): expected_spec = gen_expected_fn(tup) values, treespec = pytree_impl.tree_flatten(tup) self.assertIsInstance(values, list) self.assertEqual(values, list(tup)) self.assertEqual(treespec, expected_spec) unflattened = pytree_impl.tree_unflatten(values, treespec) self.assertEqual(unflattened, tup) self.assertIsInstance(unflattened, tuple) run_test(()) run_test((1.0,)) run_test((1.0, 2)) run_test((torch.tensor([1.0, 2]), 2, 10, 9, 11))
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_pytree.py
run_test
def run_test(lst): expected_spec = TreeSpec(list, None, [LeafSpec() for _ in lst]) values, treespec = tree_flatten(lst) self.assertTrue(isinstance(values, list)) self.assertEqual(values, lst) self.assertEqual(treespec, expected_spec) unflattened = tree_unflatten(values, treespec) self.assertEqual(unflattened, lst) self.assertTrue(isinstance(unflattened, list)) run_test([]) run_test([1., 2]) run_test([torch.tensor([1., 2]), 2, 10, 9, 11])
def run_test(tup): expected_spec = gen_expected_fn(tup) values, treespec = pytree_impl.tree_flatten(tup) self.assertIsInstance(values, list) self.assertEqual(values, list(tup)) self.assertEqual(treespec, expected_spec) unflattened = pytree_impl.tree_unflatten(values, treespec) self.assertEqual(unflattened, tup) self.assertIsInstance(unflattened, tuple) run_test(()) run_test((1.0,)) run_test((1.0, 2)) run_test((torch.tensor([1.0, 2]), 2, 10, 9, 11))
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_pytree.py
f
def f(x): return x * 3 sm1 = sum(map(tree_flatten(pytree)[0], f)) sm2 = tree_flatten(tree_map(f, pytree))[0] self.assertEqual(sm1, sm2)
def f(x): return x * 3 sm1 = sum(map(f, pytree_impl.tree_leaves(pytree))) sm2 = sum(pytree_impl.tree_leaves(pytree_impl.tree_map(f, pytree))) self.assertEqual(sm1, sm2)
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_pytree.py
invf
def invf(x): return x // 3 self.assertEqual(tree_flatten(tree_flatten(pytree, f), invf), pytree) cases = [ [()], ([],), {'a': ()}, {'a': 1, 'b': [{'c': 2}]}, {'a': 0, 'b': [2, {'c': 3}, 4], 'c': (5, 6)}, ] for case in cases: run_test(case)
def invf(x): return x // 3 self.assertEqual( pytree_impl.tree_map(invf, pytree_impl.tree_map(f, pytree)), pytree, )
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_pytree.py
f
def f(x): return x * 3 sm1 = sum(map(tree_flatten(pytree)[0], f)) sm2 = tree_flatten(tree_map(f, pytree))[0] self.assertEqual(sm1, sm2)
def f(x): return x * 3 sm1 = sum(map(f, pytree_impl.tree_leaves(pytree))) sm2 = sum(pytree_impl.tree_leaves(pytree_impl.tree_map(f, pytree))) self.assertEqual(sm1, sm2)
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_pytree.py
test_tree_only
def test_tree_only(self): self.assertEqual(tree_map_only(int, lambda x: x + 2, [0, "a"]), [2, "a"])
self.assertEqual( pytree_impl.tree_map(f, pytree_x, pytree_y, pytree_z), pytree_impl.tree_map( lambda x: f(x, (x + 1,), {"a": x * 2, "b": 2}), pytree ), ) cases = [ [()], ([],), {"a": ()}, {"a": 1, "b": [{"c": 2}]}, {"a": 0, "b": [2, {"c": 3}, 4], "c": (5, 6)}, ] for case in cases: run_test(case)
import torch from torch.testing._internal.common_utils import TestCase, run_tests from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten, TreeSpec, LeafSpec from torch.utils._pytree import _broadcast_to_and_flatten, tree_map_only, tree_all from torch.utils._pytree import tree_any, tree_all_only, tree_any_only from collections import namedtuple, OrderedDict from torch.testing._internal.common_utils import parametrize, subtest, instantiate_parametrized_tests class TestPytree(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_pytree.py
test_pytree_serialize_bad_input
def test_pytree_serialize_bad_input(self, pytree_impl): with self.assertRaises(TypeError): pytree_impl.treespec_dumps("random_blurb")
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class TestGenericPytree(TestCase): import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
test_deprecated_register_pytree_node
def test_deprecated_register_pytree_node(self): class DummyType: def __init__(self, x, y): self.x = x self.y = y with self.assertWarnsRegex( FutureWarning, "torch.utils._pytree._register_pytree_node" ): py_pytree._register_pytree_node( DummyType, lambda dummy: ([dummy.x, dummy.y], None), lambda xs, _: DummyType(*xs), ) with self.assertWarnsRegex(UserWarning, "already registered"): py_pytree._register_pytree_node( DummyType, lambda dummy: ([dummy.x, dummy.y], None), lambda xs, _: DummyType(*xs), )
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class TestPythonPytree(TestCase): import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
__init__
def __init__(self, x, y): self.x = x self.y = y
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class GlobalDummyType: import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
test_pytree_serialize
def test_pytree_serialize(self, spec): # Ensure that the spec is valid self.assertEqual( spec, py_pytree.tree_structure( py_pytree.tree_unflatten([0] * spec.num_leaves, spec) ), ) serialized_spec = py_pytree.treespec_dumps(spec) self.assertIsInstance(serialized_spec, str) self.assertEqual(spec, py_pytree.treespec_loads(serialized_spec))
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class TestPythonPytree(TestCase): import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
test_pytree_serialize_namedtuple
def test_pytree_serialize_namedtuple(self): Point1 = namedtuple("Point1", ["x", "y"]) py_pytree._register_namedtuple( Point1, serialized_type_name="test_pytree.test_pytree_serialize_namedtuple.Point1", ) spec = py_pytree.TreeSpec( namedtuple, Point1, [py_pytree.LeafSpec(), py_pytree.LeafSpec()] ) roundtrip_spec = py_pytree.treespec_loads(py_pytree.treespec_dumps(spec)) self.assertEqual(spec, roundtrip_spec) class Point2(NamedTuple): x: int y: int py_pytree._register_namedtuple( Point2, serialized_type_name="test_pytree.test_pytree_serialize_namedtuple.Point2", ) spec = py_pytree.TreeSpec( namedtuple, Point2, [py_pytree.LeafSpec(), py_pytree.LeafSpec()] ) roundtrip_spec = py_pytree.treespec_loads(py_pytree.treespec_dumps(spec)) self.assertEqual(spec, roundtrip_spec)
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class TestPythonPytree(TestCase): import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
test_pytree_serialize_namedtuple_bad
def test_pytree_serialize_namedtuple_bad(self): DummyType = namedtuple("DummyType", ["x", "y"]) spec = py_pytree.TreeSpec( namedtuple, DummyType, [py_pytree.LeafSpec(), py_pytree.LeafSpec()] ) with self.assertRaisesRegex( NotImplementedError, "Please register using `_register_namedtuple`" ): py_pytree.treespec_dumps(spec)
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class TestPythonPytree(TestCase): import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
test_pytree_custom_type_serialize_bad
def test_pytree_custom_type_serialize_bad(self): class DummyType: def __init__(self, x, y): self.x = x self.y = y py_pytree.register_pytree_node( DummyType, lambda dummy: ([dummy.x, dummy.y], None), lambda xs, _: DummyType(*xs), ) spec = py_pytree.TreeSpec( DummyType, None, [py_pytree.LeafSpec(), py_pytree.LeafSpec()] ) with self.assertRaisesRegex( NotImplementedError, "No registered serialization name" ): roundtrip_spec = py_pytree.treespec_dumps(spec)
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class TestPythonPytree(TestCase): import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
__init__
def __init__(self, x, y): self.x = x self.y = y
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class GlobalDummyType: import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
test_pytree_custom_type_serialize
def test_pytree_custom_type_serialize(self): class DummyType: def __init__(self, x, y): self.x = x self.y = y py_pytree.register_pytree_node( DummyType, lambda dummy: ([dummy.x, dummy.y], None), lambda xs, _: DummyType(*xs), serialized_type_name="test_pytree_custom_type_serialize.DummyType", to_dumpable_context=lambda context: "moo", from_dumpable_context=lambda dumpable_context: None, ) spec = py_pytree.TreeSpec( DummyType, None, [py_pytree.LeafSpec(), py_pytree.LeafSpec()] ) serialized_spec = py_pytree.treespec_dumps(spec, 1) self.assertIn("moo", serialized_spec) roundtrip_spec = py_pytree.treespec_loads(serialized_spec) self.assertEqual(roundtrip_spec, spec)
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class TestPythonPytree(TestCase): import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
__init__
def __init__(self, x, y): self.x = x self.y = y
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class GlobalDummyType: import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
test_pytree_serialize_register_bad
def test_pytree_serialize_register_bad(self): class DummyType: def __init__(self, x, y): self.x = x self.y = y with self.assertRaisesRegex( ValueError, "Both to_dumpable_context and from_dumpable_context" ): py_pytree.register_pytree_node( DummyType, lambda dummy: ([dummy.x, dummy.y], None), lambda xs, _: DummyType(*xs), serialized_type_name="test_pytree_serialize_register_bad.DummyType", to_dumpable_context=lambda context: "moo", )
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class TestPythonPytree(TestCase): import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
__init__
def __init__(self, x, y): self.x = x self.y = y
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class GlobalDummyType: import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
test_pytree_context_serialize_bad
def test_pytree_context_serialize_bad(self): class DummyType: def __init__(self, x, y): self.x = x self.y = y py_pytree.register_pytree_node( DummyType, lambda dummy: ([dummy.x, dummy.y], None), lambda xs, _: DummyType(*xs), serialized_type_name="test_pytree_serialize_serialize_bad.DummyType", to_dumpable_context=lambda context: DummyType, from_dumpable_context=lambda dumpable_context: None, ) spec = py_pytree.TreeSpec( DummyType, None, [py_pytree.LeafSpec(), py_pytree.LeafSpec()] ) with self.assertRaisesRegex( TypeError, "Object of type type is not JSON serializable" ): py_pytree.treespec_dumps(spec)
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class TestPythonPytree(TestCase): import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
__init__
def __init__(self, x, y): self.x = x self.y = y
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class GlobalDummyType: import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
test_pytree_serialize_bad_protocol
def test_pytree_serialize_bad_protocol(self): import json Point = namedtuple("Point", ["x", "y"]) spec = py_pytree.TreeSpec( namedtuple, Point, [py_pytree.LeafSpec(), py_pytree.LeafSpec()] ) py_pytree._register_namedtuple( Point, serialized_type_name="test_pytree.test_pytree_serialize_bad_protocol.Point", ) with self.assertRaisesRegex(ValueError, "Unknown protocol"): py_pytree.treespec_dumps(spec, -1) serialized_spec = py_pytree.treespec_dumps(spec) protocol, data = json.loads(serialized_spec) bad_protocol_serialized_spec = json.dumps((-1, data)) with self.assertRaisesRegex(ValueError, "Unknown protocol"): py_pytree.treespec_loads(bad_protocol_serialized_spec)
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class TestPythonPytree(TestCase): import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
test_saved_serialized
def test_saved_serialized(self): # py_pytree.tree_structure(OrderedDict([(1, (0, 1)), (2, 2), (3, {4: 3, 5: 4, 6: 5})])) complicated_spec = py_pytree.TreeSpec( OrderedDict, [1, 2, 3], [ py_pytree.TreeSpec( tuple, None, [py_pytree.LeafSpec(), py_pytree.LeafSpec()] ), py_pytree.LeafSpec(), py_pytree.TreeSpec( dict, [4, 5, 6], [ py_pytree.LeafSpec(), py_pytree.LeafSpec(), py_pytree.LeafSpec(), ], ), ], ) # Ensure that the spec is valid self.assertEqual( complicated_spec, py_pytree.tree_structure( py_pytree.tree_unflatten( [0] * complicated_spec.num_leaves, complicated_spec ) ), ) serialized_spec = py_pytree.treespec_dumps(complicated_spec) saved_spec = ( '[1, {"type": "collections.OrderedDict", "context": "[1, 2, 3]", ' '"children_spec": [{"type": "builtins.tuple", "context": "null", ' '"children_spec": [{"type": null, "context": null, ' '"children_spec": []}, {"type": null, "context": null, ' '"children_spec": []}]}, {"type": null, "context": null, ' '"children_spec": []}, {"type": "builtins.dict", "context": ' '"[4, 5, 6]", "children_spec": [{"type": null, "context": null, ' '"children_spec": []}, {"type": null, "context": null, "children_spec": ' '[]}, {"type": null, "context": null, "children_spec": []}]}]}]' ) self.assertEqual(serialized_spec, saved_spec) self.assertEqual(complicated_spec, py_pytree.treespec_loads(saved_spec))
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class TestPythonPytree(TestCase): import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
test_tree_map_with_path
def test_tree_map_with_path(self): tree = [{i: i for i in range(10)}] all_zeros = py_pytree.tree_map_with_path( lambda kp, val: val - kp[1].key + kp[0].idx, tree ) self.assertEqual(all_zeros, [dict.fromkeys(range(10), 0)])
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class TestPythonPytree(TestCase): import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
test_tree_flatten_with_path_is_leaf
def test_tree_flatten_with_path_is_leaf(self): leaf_dict = {"foo": [(3)]} pytree = (["hello", [1, 2], leaf_dict],) key_leaves, spec = py_pytree.tree_flatten_with_path( pytree, is_leaf=lambda x: isinstance(x, dict) ) self.assertTrue(key_leaves[-1][1] is leaf_dict)
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class TestPythonPytree(TestCase): import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pytree.py
test_tree_flatten_with_path_roundtrip
def test_tree_flatten_with_path_roundtrip(self): class ANamedTuple(NamedTuple): x: torch.Tensor y: int z: str @dataclass class ACustomPytree: x: Any y: Any z: Any py_pytree.register_pytree_node( ACustomPytree, flatten_fn=lambda f: ([f.x, f.y], f.z), unflatten_fn=lambda xy, z: ACustomPytree(xy[0], xy[1], z), flatten_with_keys_fn=lambda f: ((("x", f.x), ("y", f.y)), f.z), ) SOME_PYTREES = [ (None,), ["hello", [1, 2], {"foo": [(3)]}], [ANamedTuple(x=torch.rand(2, 3), y=1, z="foo")], [ACustomPytree(x=12, y={"cin": [1, 4, 10], "bar": 18}, z="leaf"), 5], ] for pytree in SOME_PYTREES: key_leaves, spec = py_pytree.tree_flatten_with_path(pytree) actual = py_pytree.tree_unflatten([leaf for _, leaf in key_leaves], spec) self.assertEqual(actual, pytree)
import collections import inspect import os import re import subprocess import sys import unittest from collections import defaultdict, deque, namedtuple, OrderedDict, UserDict from dataclasses import dataclass from typing import Any, NamedTuple import torch import torch.utils._pytree as py_pytree from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_FBCODE, parametrize, run_tests, skipIfTorchDynamo, subtest, TEST_WITH_TORCHDYNAMO, TestCase, ) import torch.utils._cxx_pytree as cxx_pytree GlobalPoint = namedtuple("GlobalPoint", ["x", "y"]) class TestPythonPytree(TestCase): import json
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added