library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/test_overrides.py
_simple_type_parser
if func in annotated_args: for arg in annotated_args[func]: # Guess valid input to aten function based on type of argument t = arg['simple_type'] if t.endswith('?'): t = t[:-1] if t == 'Tensor': if is_method and arg['name'] == 'self': # See "Note: properties and __get__" func = func.__get__(instance_gen()) continue func_args.append(instance_gen()) elif t == 'TensorList' or t == 'ITensorListRef': func_args.append([instance_gen(), instance_gen()]) elif t == 'c10::List<c10::optional<Tensor>>': func_args.append([instance_gen(), instance_gen()]) elif t == 'IntArrayRef' or t == 'SymIntArrayRef': size = arg.get('size', 2) if size == 1: func_args.append(1) else: func_args.append([1] * size) elif t == 'Scalar': func_args.append(3.5) elif t == 'bool': func_args.append(False) elif t == 'Dimname': func_args.append("") elif t == 'DimnameList': func_args.append([""]) elif t.startswith('int'): func_args.append(0) elif t in {'Stream'}: func_args.append(torch.Stream()) elif t.startswith('float') or t == 'double': func_args.append(1.0) elif t in {'Generator', 'MemoryFormat', 'TensorOptions'}: func_args.append(None) elif t == 'ScalarType': func_args.append(torch.float32) elif t == 'c10::string_view': func_args.append('') elif t == 'SymInt': # TODO: generate actual SymbolicInt func_args.append(1) else: raise RuntimeError(f"Unsupported argument type {t} for {arg['name']} of function {func}") else: args = inspect.getfullargspec(override) try: func_args = inspect.getfullargspec(func) # Remove annotations from argspec func_args = type(func_args)(**{**func_args, 'annotations': None}) if func_args != args: raise RuntimeError(f"Override for {func} doesn't match its argspec.\n" + f"Original: {inspect.signature(func)}\n" + f"Override: {inspect.signature(override)}") except TypeError: pass nargs = len(args.args) if args.defaults is not None: nargs -= len(args.defaults) func_args = [instance_gen() for _ in range(nargs)] if args.varargs is not None: func_args += [instance_gen(), instance_gen()]
def _simple_type_parser(func, arg_name, arg_type): # Guess valid input to aten function based on type of argument if arg_type == "Tensor": return instance_gen() elif arg_type == "TensorList" or arg_type == "ITensorListRef": return [instance_gen(), instance_gen()] elif arg_type == "c10::List<::std::optional<Tensor>>": return [instance_gen(), instance_gen()] elif arg_type == "IntArrayRef" or arg_type == "SymIntArrayRef": size = arg.get("size", 2) if size == 1: return 1 else: return [1] * size elif arg_type == "Scalar": return 3.5 elif arg_type == "bool": return False elif arg_type == "Dimname": return "" elif arg_type == "DimnameList": return [""] elif arg_type.startswith("int"): return 0 elif arg_type in {"Stream"}: return torch.Stream() elif arg_type.startswith("float") or arg_type == "double": return 1.0 elif arg_type in {"Generator", "MemoryFormat", "TensorOptions"}: return None elif arg_type == "ScalarType": return torch.float32 elif arg_type == "c10::string_view": return "" elif arg_type == "SymInt": # TODO: generate actual SymbolicInt return 1 else: raise RuntimeError( f"Unsupported argument type {arg_type} for {arg_name} of function {func}" ) if func in annotated_args: for arg in annotated_args[func]: # Guess valid input to aten function based on type of argument t = arg["simple_type"] if t.endswith("?"): t = t[:-1] if t == "Tensor" and is_method and arg["name"] == "self": # See "Note: properties and __get__" func = func.__get__(instance_gen()) continue arg_to_add = _simple_type_parser(func, arg["name"], t) if "is_kwarg_only" in arg and arg["is_kwarg_only"] == str(True): kwargs[arg["name"]] = arg_to_add else: func_args.append(arg_to_add) else: args = inspect.getfullargspec(override) try: func_args = inspect.getfullargspec(func) # Remove annotations from argspec func_args = type(func_args)(**{**func_args, 'annotations': None}) if func_args != args: raise RuntimeError(f"Override for {func} doesn't match its argspec.\n" + f"Original: {inspect.signature(func)}\n" + f"Override: {inspect.signature(override)}") except TypeError: pass nargs = len(args.args) if args.defaults is not None: nargs -= len(args.defaults) func_args = [instance_gen() for _ in range(nargs)] if args.varargs is not None: func_args += [instance_gen(), instance_gen()]
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_overrides.py
test_gradcheck
def test_gradcheck(self): from torch.testing._internal.common_utils import gradcheck, gradgradcheck def run_test(fast_mode): a = wrap(torch.tensor(5.0, dtype=torch.double)) b = wrap(torch.tensor(6.0, dtype=torch.double)) a.requires_grad = True b.requires_grad = True gradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode) gradgradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode) total_used_attrs = a.used_attrs.union(b.used_attrs) total_used_calls = a.used_calls.union(b.used_calls) # These attributes (and the functions below) may change # if the gradcheck implementation changes. It's best to # aim for attributes that may be commonly present on other # Tensor-likes. expected_used_attrs = { 'data', 'dtype', 'is_floating_point', 'is_sparse', 'layout', 'new_zeros', 'numel', 'requires_grad', 'requires_grad_', 'retain_grad', 'size', 'stride', } if fast_mode: expected_used_attrs.add('is_complex') expected_used_attrs.add('device') self.assertEqual(expected_used_attrs, total_used_attrs) expected_used_calls = { torch.Tensor.new_zeros, torch.Tensor.size, torch.Tensor.is_floating_point, torch.Tensor.numel, torch.Tensor.retain_grad, torch.Tensor.stride, torch.Tensor.requires_grad_, torch.autograd.grad, torch.add, } if fast_mode: expected_used_calls.add(torch.Tensor.is_complex) self.assertEqual(expected_used_calls, total_used_calls) run_test(fast_mode=True) run_test(fast_mode=False)
def test_gradcheck(self): from torch.testing._internal.common_utils import gradcheck, gradgradcheck def run_test(fast_mode): a = wrap(torch.tensor(5.0, dtype=torch.double)) b = wrap(torch.tensor(6.0, dtype=torch.double)) a.requires_grad = True b.requires_grad = True gradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode) gradgradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode) total_used_attrs = a.used_attrs.union(b.used_attrs) total_used_calls = a.used_calls.union(b.used_calls) # These attributes (and the functions below) may change # if the gradcheck implementation changes. It's best to # aim for attributes that may be commonly present on other # Tensor-likes. expected_used_attrs = { 'data', 'dtype', 'is_floating_point', 'is_sparse', 'layout', 'new_zeros', 'numel', 'requires_grad', 'requires_grad_', 'size', 'stride', } if fast_mode: expected_used_attrs.add('is_complex') expected_used_attrs.add('device') self.assertEqual(expected_used_attrs, total_used_attrs) expected_used_calls = { torch.Tensor.new_zeros, torch.Tensor.size, torch.Tensor.is_floating_point, torch.Tensor.numel, torch.Tensor.stride, torch.Tensor.requires_grad_, torch.autograd.grad, torch.add, } if fast_mode: expected_used_calls.add(torch.Tensor.is_complex) self.assertEqual(expected_used_calls, total_used_calls) run_test(fast_mode=True) run_test(fast_mode=False)
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF from torch.overrides import ( handle_torch_function, has_torch_function, get_overridable_functions, get_testing_overrides, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args class TestGradCheckOverride(TestCase): from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args class TestGradCheckOverride(TestCase): from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_overrides.py
run_test
def run_test(fast_mode): a = wrap(torch.tensor(5.0, dtype=torch.double)) b = wrap(torch.tensor(6.0, dtype=torch.double)) a.requires_grad = True b.requires_grad = True gradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode) gradgradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode) total_used_attrs = a.used_attrs.union(b.used_attrs) total_used_calls = a.used_calls.union(b.used_calls) # These attributes (and the functions below) may change # if the gradcheck implementation changes. It's best to # aim for attributes that may be commonly present on other # Tensor-likes. expected_used_attrs = { 'data', 'dtype', 'is_floating_point', 'is_sparse', 'layout', 'new_zeros', 'numel', 'requires_grad', 'requires_grad_', 'retain_grad', 'size', 'stride', } if fast_mode: expected_used_attrs.add('is_complex') expected_used_attrs.add('device') self.assertEqual(expected_used_attrs, total_used_attrs) expected_used_calls = { torch.Tensor.new_zeros, torch.Tensor.size, torch.Tensor.is_floating_point, torch.Tensor.numel, torch.Tensor.retain_grad, torch.Tensor.stride, torch.Tensor.requires_grad_, torch.autograd.grad, torch.add, } if fast_mode: expected_used_calls.add(torch.Tensor.is_complex) self.assertEqual(expected_used_calls, total_used_calls) run_test(fast_mode=True) run_test(fast_mode=False)
def run_test(fast_mode): a = wrap(torch.tensor(5.0, dtype=torch.double)) b = wrap(torch.tensor(6.0, dtype=torch.double)) a.requires_grad = True b.requires_grad = True gradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode) gradgradcheck(torch.add, (a, b), raise_exception=False, check_batched_grad=False, fast_mode=fast_mode) total_used_attrs = a.used_attrs.union(b.used_attrs) total_used_calls = a.used_calls.union(b.used_calls) # These attributes (and the functions below) may change # if the gradcheck implementation changes. It's best to # aim for attributes that may be commonly present on other # Tensor-likes. expected_used_attrs = { 'data', 'dtype', 'is_floating_point', 'is_sparse', 'layout', 'new_zeros', 'numel', 'requires_grad', 'requires_grad_', 'size', 'stride', } if fast_mode: expected_used_attrs.add('is_complex') expected_used_attrs.add('device') self.assertEqual(expected_used_attrs, total_used_attrs) expected_used_calls = { torch.Tensor.new_zeros, torch.Tensor.size, torch.Tensor.is_floating_point, torch.Tensor.numel, torch.Tensor.stride, torch.Tensor.requires_grad_, torch.autograd.grad, torch.add, } if fast_mode: expected_used_calls.add(torch.Tensor.is_complex) self.assertEqual(expected_used_calls, total_used_calls) run_test(fast_mode=True) run_test(fast_mode=False)
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF from torch.overrides import ( handle_torch_function, has_torch_function, get_overridable_functions, get_testing_overrides, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_overrides.py
generate_tensor_like_torch_implementations
def generate_tensor_like_torch_implementations(): torch_vars = vars(torch) untested_funcs = [] testing_overrides = get_testing_overrides() # test/test_cpp_api_parity.py monkeypatches torch.nn to have a new # function sample_functional. Depending on what order you run pytest # collection, this may trigger the error here. This is a hack to fix # the problem. A more proper fix is to make the "not tested" check # a test on its own, and to make sure the monkeypatch is only installed # for the span of the relevant test (and deleted afterwards) testing_ignore = {"sample_functional"} for namespace, funcs in get_overridable_functions().items(): for func in funcs: if func not in testing_overrides and func.__name__ not in testing_ignore: untested_funcs.append("{}.{}".format(namespace, func.__name__)) msg = ( "The following functions are not tested for __torch_function__ " "support, please ensure there is an entry in the dict returned by " "torch.overrides.get_testing_overrides for this function or if a " "__torch_function__ override does not make sense, add an entry to " "the tuple returned by torch._overrides.get_ignored_functions.\n\n{}" ) assert len(untested_funcs) == 0, msg.format(pprint.pformat(untested_funcs)) for func, override in testing_overrides.items(): # decorate the overrides with implements_tensor_like if it's not a # torch.Tensor method wrapped = triggered_wrapper(override) # See note: "_triggered wrapper" WRAPPED_TRIGGERED_IMPLS[func] = wrapped if is_tensor_method_or_property(func): implements_sub(func)(wrapped) else: implements_tensor_like(func)(wrapped) generate_tensor_like_torch_implementations() class TensorLike: """A class that overrides the full torch API This class is used to explicitly test that the full torch.tensor API can be overriden with a class that defines __torch_function__. """ @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): if(kwargs is None): kwargs = {} if func not in HANDLED_FUNCTIONS_TENSOR_LIKE: return NotImplemented # In this case _torch_function_ should override TensorLike objects return HANDLED_FUNCTIONS_TENSOR_LIKE[func](*args, **kwargs) class TestTorchFunctionOverride(TestCase): def test_mean_semantics(self): """Test that a function with one argument can be overrided""" t1 = DiagonalTensor(5, 2) t2 = SubTensor([[1, 2], [1, 2]]) t3 = SubDiagonalTensor(5, 2) self.assertEqual(torch.mean(t1), 0.4) self.assertEqual(bar(t1), -1) self.assertEqual(torch.mean(t2), 0) self.assertEqual(bar(t2), 1) self.assertEqual(torch.mean(t3), 4.0) self.assertEqual(bar(t3), 0) def test_has_torch_function_non_sequence(self): with self.assertRaisesRegex(TypeError, "expected a sequence"): has_torch_function(object()) def test_mm_semantics(self): """Test that a function with multiple arguments can be overrided""" t1 = DiagonalTensor(5, 2) t2 = torch.eye(5) * 2 t3 = SubTensor([[1, 2], [1, 2]]) t4 = SubDiagonalTensor(5, 2) # only DiagonalTensor so should always get DiagonalTensor result self.assertEqual(torch.mm(t1, t1), 0) # tensor and DiagonalTensor, always return DiagonalTensor result self.assertEqual(torch.mm(t1, t2), 0) self.assertEqual(torch.mm(t2, t1), 0) # only SubTensor so should always get SubTensor result self.assertEqual(torch.mm(t3, t3), -1) # tensor and SubTensor so should always get SubTensor result self.assertEqual(torch.mm(t3, t2), -1) self.assertEqual(torch.mm(t2, t3), -1) # DiagonalTensor and SubTensor are unrelated classes so the result # depends on which argument appears first self.assertEqual(torch.mm(t3, t1), -1) self.assertEqual(torch.mm(t1, t3), 0) # SubDiagonalTensor should take precedence over DiagonalTensor # but should behave otherwise the same as DiagonalTensor self.assertEqual(torch.mm(t4, t4), 1) self.assertEqual(torch.mm(t4, t1), 1) self.assertEqual(torch.mm(t1, t4), 1) self.assertEqual(torch.mm(t4, t2), 1) self.assertEqual(torch.mm(t2, t4), 1) self.assertEqual(torch.mm(t3, t4), -1) self.assertEqual(torch.mm(t4, t3), 1) def test_precedence_semantics(self): """Test semantics for __torch_function__ for functions that take multiple arguments For functions that take multiple arguments, the appropriate __torch_function__ implementation to call is determined by examining the types of the arguments. The precedence order is left-to-right in the argument list, except subclasses are always checked before superclasses. The first result of calling the implementations in precedence order that is not NotImplemented is returned to the user. If all implementations return NotImplemented, a TypeError is raised. All cases are tested with functions implemented in C++ and either foo or baz, which are python functions defined above that are instrumented to obey the same dispatch rules as the functions in torch.functional. """ # DiagonalTensor has a valid override and SubDiagonal has an # override that returns NotImplemented so we should call the # DiagonalTensor implementation, returning -1 t1 = DiagonalTensor(5, 2) t2 = SubDiagonalTensor(5, 2) self.assertEqual(torch.div(t1, t2), -1) self.assertEqual(torch.div(t2, t1), -1) self.assertEqual(foo(t1, t2), -1) self.assertEqual(foo(t2, t1), -1) # SubTensor has an implementation that returns NotImplemented as # well so it should behave exactly like SubDiagonalTensor in the # test above t3 = SubTensor([[1, 2], [1, 2]]) self.assertEqual(torch.div(t1, t3), -1) self.assertEqual(torch.div(t3, t1), -1) self.assertEqual(foo(t1, t3), -1) self.assertEqual(foo(t3, t1), -1) # div between SubTensor and SubDiagonalTensor should raise # TypeError since both have an implementation that # explicitly returns NotImplemented with self.assertRaises(TypeError): torch.div(t2, t3) with self.assertRaises(TypeError): torch.div(t3, t2) with self.assertRaises(TypeError): foo(t2, t3) with self.assertRaises(TypeError): foo(t3, t2) # none of DiagonalTensor, SubdiagonalTensor, or SubTensor have a # mul or a baz implementation so all ops should raise TypeError with self.assertRaises(TypeError): torch.mul(t1, t1) with self.assertRaises(TypeError): torch.mul(t1, t2) with self.assertRaises(TypeError): torch.mul(t1, t3) with self.assertRaises(TypeError): torch.mul(t2, t1) with self.assertRaises(TypeError): torch.mul(t2, t2) with self.assertRaises(TypeError): torch.mul(t2, t3) with self.assertRaises(TypeError): torch.mul(t3, t1) with self.assertRaises(TypeError): torch.mul(t3, t2) with self.assertRaises(TypeError): torch.mul(t3, t3) with self.assertRaises(TypeError): baz(t1, t1) with self.assertRaises(TypeError): baz(t1, t2) with self.assertRaises(TypeError): baz(t1, t3) with self.assertRaises(TypeError): baz(t2, t1) with self.assertRaises(TypeError): baz(t2, t2) with self.assertRaises(TypeError): baz(t2, t3) with self.assertRaises(TypeError): baz(t3, t1) with self.assertRaises(TypeError): baz(t3, t2) with self.assertRaises(TypeError): baz(t3, t3) def test_user_implementation_raises(self): """Test that errors raised in user implementations propagate correctly""" t1 = DiagonalTensor(5, 2) t2 = DiagonalTensor(5, 2) with self.assertRaises(ValueError): torch.add(t1, t2) with self.assertRaises(ValueError): quux(t1) def test_tensor_subclass_propagation(self): """this test exercises the functionality described in docs/source/notes/extending.rst#subclassing-torchtensor""" t1 = torch.tensor([5]) t2 = torch.tensor([6]) s1 = SubTensor2([5]) s2 = SubTensor2([6]) ss1 = SubSubTensor2([5]) ss2 = SubSubTensor2([6]) sn1 = SubTensor3([5]) sn2 = SubTensor3([6]) # Check that leaf subclass is kept regardless of order self.assertTrue(isinstance(s1 + t2, SubTensor2)) self.assertTrue(isinstance(t1 + s2, SubTensor2)) self.assertTrue(isinstance(s1 + s2, SubTensor2)) # Check indexing subclass is kept self.assertTrue(isinstance(s1[0], SubTensor2)) # Check case for subclass of subclass. self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2)) self.assertTrue(isinstance(ss1 + s2, SubSubTensor2)) self.assertTrue(isinstance(s1 + ss2, SubSubTensor2)) self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2)) self.assertTrue(isinstance(ss1 + t2, SubSubTensor2)) self.assertTrue(isinstance(t1 + ss2, SubSubTensor2)) self.assertTrue(isinstance(ss1[0], SubSubTensor2)) # Make sure unrelated class trees are not merged. with self.assertRaises(TypeError): s1 + sn2 with self.assertRaises(TypeError): sn1 + s2 def test_base(self): # https://github.com/szagoruyko/pytorchviz/issues/65 class DummyTensor(torch.Tensor): pass a = torch.ones(1) c = DummyTensor(a) self.assertTrue(c._is_view()) self.assertTrue(c._base is a) def test_grad(self): # Previously, Tensor-like objects that did not subclass from Tensor # did not get wrapped into unary tuples before being passed into # handle_torch_function, in contradiction with how Tensor-likes # were handled # # NB: this asserts that the arguments get normalized into a tuple # before entering the torch function handler; it could go the # other way but beware https://github.com/pytorch/pytorch/issues/76037 class Dummy: @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): inputs, outputs = args self.assertEqual(inputs, (x,)) self.assertEqual(outputs, (x,)) return -1 x = Dummy() self.assertEqual(torch.autograd.grad(x, x), -1) def test_pow_rpow(self): class NothingImplemented(torch.Tensor): @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): return NotImplemented class RPowOnly(torch.Tensor): @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): if func is torch.Tensor.__rpow__: return -1 return NotImplemented self.assertEqual(NothingImplemented() ** RPowOnly(), -1)
def generate_tensor_like_torch_implementations(): torch_vars = vars(torch) untested_funcs = [] testing_overrides = get_testing_overrides() # test/test_cpp_api_parity.py monkeypatches torch.nn to have a new # function sample_functional. Depending on what order you run pytest # collection, this may trigger the error here. This is a hack to fix # the problem. A more proper fix is to make the "not tested" check # a test on its own, and to make sure the monkeypatch is only installed # for the span of the relevant test (and deleted afterwards) testing_ignore = {"sample_functional", "autocast"} for namespace, funcs in get_overridable_functions().items(): for func in funcs: if func not in testing_overrides and func.__name__ not in testing_ignore: untested_funcs.append(f"{namespace}.{func.__name__}") msg = ( "The following functions are not tested for __torch_function__ " "support, please ensure there is an entry in the dict returned by " "torch.overrides.get_testing_overrides for this function or if a " "__torch_function__ override does not make sense, add an entry to " "the tuple returned by torch._overrides.get_ignored_functions.\n\n{}" ) assert len(untested_funcs) == 0, msg.format(pprint.pformat(untested_funcs)) for func, override in testing_overrides.items(): # decorate the overrides with implements_tensor_like if it's not a # torch.Tensor method wrapped = triggered_wrapper(override) # See note: "_triggered wrapper" WRAPPED_TRIGGERED_IMPLS[func] = wrapped if is_tensor_method_or_property(func): implements_sub(func)(wrapped) else: implements_tensor_like(func)(wrapped) generate_tensor_like_torch_implementations() class TensorLike: """A class that overrides the full torch API This class is used to explicitly test that the full torch.tensor API can be overriden with a class that defines __torch_function__. """ @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): if kwargs is None: kwargs = {} if func not in HANDLED_FUNCTIONS_TENSOR_LIKE: return NotImplemented # In this case _torch_function_ should override TensorLike objects return HANDLED_FUNCTIONS_TENSOR_LIKE[func](*args, **kwargs) class TestTorchFunctionOverride(TestCase): @classmethod def setUpClass(cls): cls._stack = contextlib.ExitStack() if TEST_WITH_TORCHDYNAMO: # Add classes to the wrapped tensor subclasses @contextlib.contextmanager def setup_subclasses(): old = set(torch._dynamo.config.traceable_tensor_subclasses) torch._dynamo.config.traceable_tensor_subclasses.add(DiagonalTensor) try: yield finally: torch._dynamo.config.traceable_tensor_subclasses.clear() torch._dynamo.config.traceable_tensor_subclasses.update(old) cls._stack.enter_context(setup_subclasses()) @classmethod def tearDownClass(cls): cls._stack.close() def test_mean_semantics(self): """Test that a function with one argument can be overridden""" t1 = DiagonalTensor(5, 2) t2 = SubTensor([[1, 2], [1, 2]]) t3 = SubDiagonalTensor(5, 2) self.assertEqual(torch.mean(t1), 0.4) self.assertEqual(bar(t1), -1) self.assertEqual(torch.mean(t2), 0) self.assertEqual(bar(t2), 1) self.assertEqual(torch.mean(t3), 4.0) self.assertEqual(bar(t3), 0) def test_has_torch_function_non_sequence(self): with self.assertRaisesRegex(TypeError, "expected a sequence"): has_torch_function(object()) def test_mm_semantics(self): """Test that a function with multiple arguments can be overridden""" t1 = DiagonalTensor(5, 2) t2 = torch.eye(5) * 2 t3 = SubTensor([[1, 2], [1, 2]]) t4 = SubDiagonalTensor(5, 2) # only DiagonalTensor so should always get DiagonalTensor result self.assertEqual(torch.mm(t1, t1), 0) # tensor and DiagonalTensor, always return DiagonalTensor result self.assertEqual(torch.mm(t1, t2), 0) self.assertEqual(torch.mm(t2, t1), 0) # only SubTensor so should always get SubTensor result self.assertEqual(torch.mm(t3, t3), -1) # tensor and SubTensor so should always get SubTensor result self.assertEqual(torch.mm(t3, t2), -1) self.assertEqual(torch.mm(t2, t3), -1) # DiagonalTensor and SubTensor are unrelated classes so the result # depends on which argument appears first self.assertEqual(torch.mm(t3, t1), -1) self.assertEqual(torch.mm(t1, t3), 0) # SubDiagonalTensor should take precedence over DiagonalTensor # but should behave otherwise the same as DiagonalTensor self.assertEqual(torch.mm(t4, t4), 1) self.assertEqual(torch.mm(t4, t1), 1) self.assertEqual(torch.mm(t1, t4), 1) self.assertEqual(torch.mm(t4, t2), 1) self.assertEqual(torch.mm(t2, t4), 1) self.assertEqual(torch.mm(t3, t4), -1) self.assertEqual(torch.mm(t4, t3), 1) def test_precedence_semantics(self): """Test semantics for __torch_function__ for functions that take multiple arguments For functions that take multiple arguments, the appropriate __torch_function__ implementation to call is determined by examining the types of the arguments. The precedence order is left-to-right in the argument list, except subclasses are always checked before superclasses. The first result of calling the implementations in precedence order that is not NotImplemented is returned to the user. If all implementations return NotImplemented, a TypeError is raised. All cases are tested with functions implemented in C++ and either foo or baz, which are python functions defined above that are instrumented to obey the same dispatch rules as the functions in torch.functional. """ # DiagonalTensor has a valid override and SubDiagonal has an # override that returns NotImplemented so we should call the # DiagonalTensor implementation, returning -1 t1 = DiagonalTensor(5, 2) t2 = SubDiagonalTensor(5, 2) self.assertEqual(torch.div(t1, t2), -1) self.assertEqual(torch.div(t2, t1), -1) self.assertEqual(foo(t1, t2), -1) self.assertEqual(foo(t2, t1), -1) # SubTensor has an implementation that returns NotImplemented as # well so it should behave exactly like SubDiagonalTensor in the # test above t3 = SubTensor([[1, 2], [1, 2]]) self.assertEqual(torch.div(t1, t3), -1) self.assertEqual(torch.div(t3, t1), -1) self.assertEqual(foo(t1, t3), -1) self.assertEqual(foo(t3, t1), -1) # div between SubTensor and SubDiagonalTensor should raise # TypeError since both have an implementation that # explicitly returns NotImplemented with self.assertRaises(TypeError): torch.div(t2, t3) with self.assertRaises(TypeError): torch.div(t3, t2) with self.assertRaises(TypeError): foo(t2, t3) with self.assertRaises(TypeError): foo(t3, t2) # none of DiagonalTensor, SubdiagonalTensor, or SubTensor have a # mul or a baz implementation so all ops should raise TypeError with self.assertRaises(TypeError): torch.mul(t1, t1) with self.assertRaises(TypeError): torch.mul(t1, t2) with self.assertRaises(TypeError): torch.mul(t1, t3) with self.assertRaises(TypeError): torch.mul(t2, t1) with self.assertRaises(TypeError): torch.mul(t2, t2) with self.assertRaises(TypeError): torch.mul(t2, t3) with self.assertRaises(TypeError): torch.mul(t3, t1) with self.assertRaises(TypeError): torch.mul(t3, t2) with self.assertRaises(TypeError): torch.mul(t3, t3) with self.assertRaises(TypeError): baz(t1, t1) with self.assertRaises(TypeError): baz(t1, t2) with self.assertRaises(TypeError): baz(t1, t3) with self.assertRaises(TypeError): baz(t2, t1) with self.assertRaises(TypeError): baz(t2, t2) with self.assertRaises(TypeError): baz(t2, t3) with self.assertRaises(TypeError): baz(t3, t1) with self.assertRaises(TypeError): baz(t3, t2) with self.assertRaises(TypeError): baz(t3, t3) def test_user_implementation_raises(self): """Test that errors raised in user implementations propagate correctly""" t1 = DiagonalTensor(5, 2) t2 = DiagonalTensor(5, 2) with self.assertRaises(ValueError): torch.add(t1, t2) with self.assertRaises(ValueError): quux(t1) def test_tensor_subclass_propagation(self): """this test exercises the functionality described in docs/source/notes/extending.rst#subclassing-torchtensor""" t1 = torch.tensor([5]) t2 = torch.tensor([6]) s1 = SubTensor2([5]) s2 = SubTensor2([6]) ss1 = SubSubTensor2([5]) ss2 = SubSubTensor2([6]) sn1 = SubTensor3([5]) sn2 = SubTensor3([6]) # Check that leaf subclass is kept regardless of order self.assertTrue(isinstance(s1 + t2, SubTensor2)) self.assertTrue(isinstance(t1 + s2, SubTensor2)) self.assertTrue(isinstance(s1 + s2, SubTensor2)) # Check indexing subclass is kept self.assertTrue(isinstance(s1[0], SubTensor2)) # Check case for subclass of subclass. self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2)) self.assertTrue(isinstance(ss1 + s2, SubSubTensor2)) self.assertTrue(isinstance(s1 + ss2, SubSubTensor2)) self.assertTrue(isinstance(ss1 + ss2, SubSubTensor2)) self.assertTrue(isinstance(ss1 + t2, SubSubTensor2)) self.assertTrue(isinstance(t1 + ss2, SubSubTensor2)) self.assertTrue(isinstance(ss1[0], SubSubTensor2)) # Make sure unrelated class trees are not merged. with self.assertRaises(TypeError): s1 + sn2 with self.assertRaises(TypeError): sn1 + s2 def test_base(self): # https://github.com/szagoruyko/pytorchviz/issues/65 class DummyTensor(torch.Tensor): pass a = torch.ones(1) c = DummyTensor(a) self.assertTrue(c._is_view()) self.assertTrue(c._base is a) def test_grad(self): # Previously, Tensor-like objects that did not subclass from Tensor # did not get wrapped into unary tuples before being passed into # handle_torch_function, in contradiction with how Tensor-likes # were handled # # NB: this asserts that the arguments get normalized into a tuple # before entering the torch function handler; it could go the # other way but beware https://github.com/pytorch/pytorch/issues/76037 class Dummy: @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): inputs, outputs = args self.assertEqual(inputs, (x,)) self.assertEqual(outputs, (x,)) return -1 x = Dummy() self.assertEqual(torch.autograd.grad(x, x), -1) def test_pow_rpow(self): class NothingImplemented(torch.Tensor): @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): return NotImplemented class RPowOnly(torch.Tensor): @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): if func is torch.Tensor.__rpow__: return -1 return NotImplemented self.assertEqual(NothingImplemented() ** RPowOnly(), -1)
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF from torch.overrides import ( handle_torch_function, has_torch_function, get_overridable_functions, get_testing_overrides, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_overrides.py
setup_subclasses
def setup_subclasses(): old = set(torch._dynamo.config.traceable_tensor_subclasses) torch._dynamo.config.traceable_tensor_subclasses.add(DiagonalTensor) try: yield finally: torch._dynamo.config.traceable_tensor_subclasses.clear() torch._dynamo.config.traceable_tensor_subclasses.update(old) cls._stack.enter_context(setup_subclasses())
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_overrides.py
tearDownClass
def tearDownClass(cls): cls._stack.close()
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} class TestTorchFunctionOverride(TestCase): from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_overrides.py
test_mean_semantics
def test_mean_semantics(self): """Test that a function with one argument can be overrided""" t1 = DiagonalTensor(5, 2) t2 = SubTensor([[1, 2], [1, 2]]) t3 = SubDiagonalTensor(5, 2) self.assertEqual(torch.mean(t1), 0.4) self.assertEqual(bar(t1), -1) self.assertEqual(torch.mean(t2), 0) self.assertEqual(bar(t2), 1) self.assertEqual(torch.mean(t3), 4.0) self.assertEqual(bar(t3), 0)
def test_mean_semantics(self): """Test that a function with one argument can be overridden""" t1 = DiagonalTensor(5, 2) t2 = SubTensor([[1, 2], [1, 2]]) t3 = SubDiagonalTensor(5, 2) self.assertEqual(torch.mean(t1), 0.4) self.assertEqual(bar(t1), -1) self.assertEqual(torch.mean(t2), 0) self.assertEqual(bar(t2), 1) self.assertEqual(torch.mean(t3), 4.0) self.assertEqual(bar(t3), 0)
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF from torch.overrides import ( handle_torch_function, has_torch_function, get_overridable_functions, get_testing_overrides, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} class TestTorchFunctionOverride(TestCase): from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} class TestTorchFunctionOverride(TestCase): from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_overrides.py
test_mm_semantics
def test_mm_semantics(self): """Test that a function with multiple arguments can be overrided""" t1 = DiagonalTensor(5, 2) t2 = torch.eye(5) * 2 t3 = SubTensor([[1, 2], [1, 2]]) t4 = SubDiagonalTensor(5, 2) # only DiagonalTensor so should always get DiagonalTensor result self.assertEqual(torch.mm(t1, t1), 0) # tensor and DiagonalTensor, always return DiagonalTensor result self.assertEqual(torch.mm(t1, t2), 0) self.assertEqual(torch.mm(t2, t1), 0) # only SubTensor so should always get SubTensor result self.assertEqual(torch.mm(t3, t3), -1) # tensor and SubTensor so should always get SubTensor result self.assertEqual(torch.mm(t3, t2), -1) self.assertEqual(torch.mm(t2, t3), -1) # DiagonalTensor and SubTensor are unrelated classes so the result # depends on which argument appears first self.assertEqual(torch.mm(t3, t1), -1) self.assertEqual(torch.mm(t1, t3), 0) # SubDiagonalTensor should take precedence over DiagonalTensor # but should behave otherwise the same as DiagonalTensor self.assertEqual(torch.mm(t4, t4), 1) self.assertEqual(torch.mm(t4, t1), 1) self.assertEqual(torch.mm(t1, t4), 1) self.assertEqual(torch.mm(t4, t2), 1) self.assertEqual(torch.mm(t2, t4), 1) self.assertEqual(torch.mm(t3, t4), -1) self.assertEqual(torch.mm(t4, t3), 1)
def test_mm_semantics(self): """Test that a function with multiple arguments can be overridden""" t1 = DiagonalTensor(5, 2) t2 = torch.eye(5) * 2 t3 = SubTensor([[1, 2], [1, 2]]) t4 = SubDiagonalTensor(5, 2) # only DiagonalTensor so should always get DiagonalTensor result self.assertEqual(torch.mm(t1, t1), 0) # tensor and DiagonalTensor, always return DiagonalTensor result self.assertEqual(torch.mm(t1, t2), 0) self.assertEqual(torch.mm(t2, t1), 0) # only SubTensor so should always get SubTensor result self.assertEqual(torch.mm(t3, t3), -1) # tensor and SubTensor so should always get SubTensor result self.assertEqual(torch.mm(t3, t2), -1) self.assertEqual(torch.mm(t2, t3), -1) # DiagonalTensor and SubTensor are unrelated classes so the result # depends on which argument appears first self.assertEqual(torch.mm(t3, t1), -1) self.assertEqual(torch.mm(t1, t3), 0) # SubDiagonalTensor should take precedence over DiagonalTensor # but should behave otherwise the same as DiagonalTensor self.assertEqual(torch.mm(t4, t4), 1) self.assertEqual(torch.mm(t4, t1), 1) self.assertEqual(torch.mm(t1, t4), 1) self.assertEqual(torch.mm(t4, t2), 1) self.assertEqual(torch.mm(t2, t4), 1) self.assertEqual(torch.mm(t3, t4), -1) self.assertEqual(torch.mm(t4, t3), 1)
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF from torch.overrides import ( handle_torch_function, has_torch_function, get_overridable_functions, get_testing_overrides, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} class TestTorchFunctionOverride(TestCase): from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} class TestTorchFunctionOverride(TestCase): from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_overrides.py
generate_tensor_like_override_tests
def generate_tensor_like_override_tests(cls): from torch.testing._internal.generated.annotated_fn_args import annotated_args def test_generator(func, override): # If func corresponds to a torch.Tensor method or property. if is_tensor_method_or_property(func): # Generate an instance by using SubTensor, def instance_gen(): return SubTensor([5]) else: # Otherwise, TensorLike. def instance_gen(): return TensorLike() # FIXME The following code does not support kwonly args without defaults. # The fix is easy, as one just needs to save these args when generating the variable # annotated_args. The problem is that, if one does so, one finds a number # of functions that have problematic signatures in native_functions.yaml. # Fixing these would be BC breaking, so hence this terrible hack # https://github.com/pytorch/pytorch/issues/67008 kwargs = {} if hasattr(func, "__name__") and "linalg_solve_triangular" in func.__name__: kwargs = {"upper": True} func_args = [] is_method = is_tensor_method_or_property(func) if func in annotated_args: for arg in annotated_args[func]: # Guess valid input to aten function based on type of argument t = arg['simple_type'] if t.endswith('?'): t = t[:-1] if t == 'Tensor': if is_method and arg['name'] == 'self': # See "Note: properties and __get__" func = func.__get__(instance_gen()) continue func_args.append(instance_gen()) elif t == 'TensorList' or t == 'ITensorListRef': func_args.append([instance_gen(), instance_gen()]) elif t == 'c10::List<c10::optional<Tensor>>': func_args.append([instance_gen(), instance_gen()]) elif t == 'IntArrayRef' or t == 'SymIntArrayRef': size = arg.get('size', 2) if size == 1: func_args.append(1) else: func_args.append([1] * size) elif t == 'Scalar': func_args.append(3.5) elif t == 'bool': func_args.append(False) elif t == 'Dimname': func_args.append("") elif t == 'DimnameList': func_args.append([""]) elif t.startswith('int'): func_args.append(0) elif t in {'Stream'}: func_args.append(torch.Stream()) elif t.startswith('float') or t == 'double': func_args.append(1.0) elif t in {'Generator', 'MemoryFormat', 'TensorOptions'}: func_args.append(None) elif t == 'ScalarType': func_args.append(torch.float32) elif t == 'c10::string_view': func_args.append('') elif t == 'SymInt': # TODO: generate actual SymbolicInt func_args.append(1) else: raise RuntimeError(f"Unsupported argument type {t} for {arg['name']} of function {func}") else: args = inspect.getfullargspec(override) try: func_args = inspect.getfullargspec(func) # Remove annotations from argspec func_args = type(func_args)(**{**func_args, 'annotations': None}) if func_args != args: raise RuntimeError(f"Override for {func} doesn't match its argspec.\n" + f"Original: {inspect.signature(func)}\n" + f"Override: {inspect.signature(override)}") except TypeError: pass nargs = len(args.args) if args.defaults is not None: nargs -= len(args.defaults) func_args = [instance_gen() for _ in range(nargs)] if args.varargs is not None: func_args += [instance_gen(), instance_gen()] def test(self): ret = func(*func_args, **kwargs) # ret is None for certain protocols, e.g., `__weakref__` and `__setitem__` # This is currently the best check but doesn't work for, for example, # Tensor.__add__ because it redirects to Tensor.add. # See note "_triggered wrapper" if not is_method or ret is None: self.assertTrue(WRAPPED_TRIGGERED_IMPLS[func]._triggered) return self.assertEqual(ret, -1) return test for func, override in get_testing_overrides().items(): test_method = test_generator(func, override) if func.__name__ == "__get__": # Note: properties and __get__ # __get__ is part of the descriptor protocol. # https://docs.python.org/3/howto/descriptor.html # This is used for properties of the form # torch.Tensor.<property>, with the method __get__ # In this case we get the property name in two ways: # This case for properties defined in C. module = getattr( func.__self__, "__qualname__", None ) # This one for properties defined in Python. if module is None: module = "Tensor." + func.__self__.fget.__name__ # Unfortunately I couldn't find a way to unify these two cases # and there is no way for general descriptors. elif is_tensor_method_or_property(func): module = "Tensor" else: module = func.__module__ if module: name = 'test_{}_{}'.format(module.replace('.', '_'), func.__name__) else: name = 'test_{}'.format(func.__name__) test_method.__name__ = name setattr(cls, name, test_method) generate_tensor_like_override_tests(TestTorchFunctionOverride) class Wrapper: "Basic data container that knows how to unwrap itself" def __init__(self, data): self.__dict__["_data"] = data self.__dict__["used_attrs"] = set() self.__dict__["used_calls"] = set() def __getattr__(self, name): if name in self.__dict__: return self.__dict__[name] self.used_attrs.add(name) val = getattr(self._data, name) # If it's a method if not isinstance(val, torch.device) and callable(val): c = getattr(type(self._data), name) # Don't append self to args if classmethod/staticmethod if c is val: return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=a, kwargs=kw)) # Otherwise append self to args return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=(self,) + a, kwargs=kw)) return wrap(val) def __setattr__(self, name, value): if name in self.__dict__: self.__dict__[name] = value self.used_attrs.add(name) setattr(self._data, name, unwrap(value)) def __setitem__(self, key, value): self._data[unwrap(key)] = unwrap(value) def __getitem__(self, key): return wrap(self._data[unwrap(key)]) @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): if kwargs is None: kwargs = {} # Find an instance of this class in the arguments args_of_this_cls = [] for a in args: if isinstance(a, cls): args_of_this_cls.append(a) elif isinstance(a, collections.abc.Sequence): args_of_this_cls.extend(el for el in a if isinstance(el, cls)) assert len(args_of_this_cls) > 0 for a in args_of_this_cls: a.used_calls.add(func) args = unwrap(tuple(args)) kwargs = {k: unwrap(v) for k, v in kwargs.items()} return wrap(func(*args, **kwargs)) def __add__(self, other): return self.__torch_function__(torch.add, (Wrapper,), (self, other)) def __mul__(self, other): return self.__torch_function__(torch.mul, (Wrapper,), (self, other)) def __sub__(self, other): return self.__torch_function__(torch.sub, (Wrapper,), (self, other)) def __truediv__(self, other): return self.__torch_function__(torch.true_divide, (Wrapper,), (self, other)) def __floordiv__(self, other): return self.__torch_function__(torch.floor_divide, (Wrapper,), (self, other)) def __ge__(self, other): return self.__torch_function__(torch.ge, (Wrapper,), (self, other)) def __gt__(self, other): return self.__torch_function__(torch.gt, (Wrapper,), (self, other)) def __lt__(self, other): return self.__torch_function__(torch.lt, (Wrapper,), (self, other)) def __le__(self, other): return self.__torch_function__(torch.le, (Wrapper,), (self, other)) def __eq__(self, other): return self.__torch_function__(torch.eq, (Wrapper,), (self, other)) def __ne__(self, other): return self.__torch_function__(torch.ne, (Wrapper,), (self, other)) def __bool__(self): return self.__torch_function__(torch.Tensor.__bool__, (Wrapper,), (self,)) def __int__(self): return self.__torch_function__(torch.Tensor.__int__, (Wrapper,), (self,)) def __len__(self): return len(self._data) # unwrap inputs if necessary
def generate_tensor_like_override_tests(cls): from torch.testing._internal.generated.annotated_fn_args import annotated_args def test_generator(func, override): # If func corresponds to a torch.Tensor method or property. if is_tensor_method_or_property(func): # Generate an instance by using SubTensor, def instance_gen(): return SubTensor([5]) else: # Otherwise, TensorLike. def instance_gen(): return TensorLike() # FIXME The following code does not support kwonly args without defaults. # The fix is easy, as one just needs to save these args when generating the variable # annotated_args. The problem is that, if one does so, one finds a number # of functions that have problematic signatures in native_functions.yaml. # Fixing these would be BC breaking, so hence this terrible hack # https://github.com/pytorch/pytorch/issues/67008 kwargs = {} if hasattr(func, "__name__") and "linalg_solve_triangular" in func.__name__: kwargs = {"upper": True} func_args = [] is_method = is_tensor_method_or_property(func) def _simple_type_parser(func, arg_name, arg_type): # Guess valid input to aten function based on type of argument if arg_type == "Tensor": return instance_gen() elif arg_type == "TensorList" or arg_type == "ITensorListRef": return [instance_gen(), instance_gen()] elif arg_type == "c10::List<::std::optional<Tensor>>": return [instance_gen(), instance_gen()] elif arg_type == "IntArrayRef" or arg_type == "SymIntArrayRef": size = arg.get("size", 2) if size == 1: return 1 else: return [1] * size elif arg_type == "Scalar": return 3.5 elif arg_type == "bool": return False elif arg_type == "Dimname": return "" elif arg_type == "DimnameList": return [""] elif arg_type.startswith("int"): return 0 elif arg_type in {"Stream"}: return torch.Stream() elif arg_type.startswith("float") or arg_type == "double": return 1.0 elif arg_type in {"Generator", "MemoryFormat", "TensorOptions"}: return None elif arg_type == "ScalarType": return torch.float32 elif arg_type == "c10::string_view": return "" elif arg_type == "SymInt": # TODO: generate actual SymbolicInt return 1 else: raise RuntimeError( f"Unsupported argument type {arg_type} for {arg_name} of function {func}" ) if func in annotated_args: for arg in annotated_args[func]: # Guess valid input to aten function based on type of argument t = arg["simple_type"] if t.endswith("?"): t = t[:-1] if t == "Tensor" and is_method and arg["name"] == "self": # See "Note: properties and __get__" func = func.__get__(instance_gen()) continue arg_to_add = _simple_type_parser(func, arg["name"], t) if "is_kwarg_only" in arg and arg["is_kwarg_only"] == str(True): kwargs[arg["name"]] = arg_to_add else: func_args.append(arg_to_add) else: args = inspect.getfullargspec(override) try: func_args = inspect.getfullargspec(func) # Remove annotations from argspec func_args = type(func_args)(**{**func_args, 'annotations': None}) if func_args != args: raise RuntimeError(f"Override for {func} doesn't match its argspec.\n" + f"Original: {inspect.signature(func)}\n" + f"Override: {inspect.signature(override)}") except TypeError: pass nargs = len(args.args) if args.defaults is not None: nargs -= len(args.defaults) func_args = [instance_gen() for _ in range(nargs)] if args.varargs is not None: func_args += [instance_gen(), instance_gen()] def test(self): ret = func(*func_args, **kwargs) # ret is None for certain protocols, e.g., `__weakref__` and `__setitem__` # This is currently the best check but doesn't work for, for example, # Tensor.__add__ because it redirects to Tensor.add. # See note "_triggered wrapper" if not is_method or ret is None: self.assertTrue(WRAPPED_TRIGGERED_IMPLS[func]._triggered) return self.assertEqual(ret, -1) return test for func, override in get_testing_overrides().items(): test_method = test_generator(func, override) if func.__name__ == "__get__": # Note: properties and __get__ # __get__ is part of the descriptor protocol. # https://docs.python.org/3/howto/descriptor.html # This is used for properties of the form # torch.Tensor.<property>, with the method __get__ # In this case we get the property name in two ways: # This case for properties defined in C. module = getattr( func.__self__, "__qualname__", None ) # This one for properties defined in Python. if module is None: module = "Tensor." + func.__self__.fget.__name__ # Unfortunately I couldn't find a way to unify these two cases # and there is no way for general descriptors. elif is_tensor_method_or_property(func): module = "Tensor" else: module = func.__module__ if module: name = 'test_{}_{}'.format(module.replace('.', '_'), func.__name__) else: name = f'test_{func.__name__}' test_method.__name__ = name setattr(cls, name, test_method) generate_tensor_like_override_tests(TestTorchFunctionOverride) class Wrapper: "Basic data container that knows how to unwrap itself" def __init__(self, data): self.__dict__["_data"] = data self.__dict__["used_attrs"] = set() self.__dict__["used_calls"] = set() def __getattr__(self, name): if name in self.__dict__: return self.__dict__[name] self.used_attrs.add(name) val = getattr(self._data, name) # If it's a method if not isinstance(val, torch.device) and callable(val): c = getattr(type(self._data), name) # Don't append self to args if classmethod/staticmethod if c is val: return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=a, kwargs=kw)) # Otherwise append self to args return lambda *a, **kw: wrap(self.__torch_function__(c, (Wrapper,), args=(self,) + a, kwargs=kw)) return wrap(val) def __setattr__(self, name, value): if name in self.__dict__: self.__dict__[name] = value self.used_attrs.add(name) setattr(self._data, name, unwrap(value)) def __setitem__(self, key, value): self._data[unwrap(key)] = unwrap(value) def __getitem__(self, key): return wrap(self._data[unwrap(key)]) @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): if kwargs is None: kwargs = {} # Find an instance of this class in the arguments args_of_this_cls = [] for a in args: if isinstance(a, cls): args_of_this_cls.append(a) elif isinstance(a, collections.abc.Sequence): args_of_this_cls.extend(el for el in a if isinstance(el, cls)) assert len(args_of_this_cls) > 0 for a in args_of_this_cls: a.used_calls.add(func) args = unwrap(tuple(args)) kwargs = {k: unwrap(v) for k, v in kwargs.items()} return wrap(func(*args, **kwargs)) def __add__(self, other): return self.__torch_function__(torch.add, (Wrapper,), (self, other)) def __mul__(self, other): return self.__torch_function__(torch.mul, (Wrapper,), (self, other)) def __sub__(self, other): return self.__torch_function__(torch.sub, (Wrapper,), (self, other)) def __truediv__(self, other): return self.__torch_function__(torch.true_divide, (Wrapper,), (self, other)) def __floordiv__(self, other): return self.__torch_function__(torch.floor_divide, (Wrapper,), (self, other)) def __ge__(self, other): return self.__torch_function__(torch.ge, (Wrapper,), (self, other)) def __gt__(self, other): return self.__torch_function__(torch.gt, (Wrapper,), (self, other)) def __lt__(self, other): return self.__torch_function__(torch.lt, (Wrapper,), (self, other)) def __le__(self, other): return self.__torch_function__(torch.le, (Wrapper,), (self, other)) def __eq__(self, other): return self.__torch_function__(torch.eq, (Wrapper,), (self, other)) def __ne__(self, other): return self.__torch_function__(torch.ne, (Wrapper,), (self, other)) def __bool__(self): return self.__torch_function__(torch.Tensor.__bool__, (Wrapper,), (self,)) def __int__(self): return self.__torch_function__(torch.Tensor.__int__, (Wrapper,), (self,)) def __len__(self): return len(self._data) # unwrap inputs if necessary
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF from torch.overrides import ( handle_torch_function, has_torch_function, get_overridable_functions, get_testing_overrides, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_overrides.py
test_generator
def test_generator(func, override): # If func corresponds to a torch.Tensor method or property. if is_tensor_method_or_property(func): # Generate an instance by using SubTensor, def instance_gen(): return SubTensor([5]) else: # Otherwise, TensorLike. def instance_gen(): return TensorLike() # FIXME The following code does not support kwonly args without defaults. # The fix is easy, as one just needs to save these args when generating the variable # annotated_args. The problem is that, if one does so, one finds a number # of functions that have problematic signatures in native_functions.yaml. # Fixing these would be BC breaking, so hence this terrible hack # https://github.com/pytorch/pytorch/issues/67008 kwargs = {} if hasattr(func, "__name__") and "linalg_solve_triangular" in func.__name__: kwargs = {"upper": True} func_args = [] is_method = is_tensor_method_or_property(func) if func in annotated_args: for arg in annotated_args[func]: # Guess valid input to aten function based on type of argument t = arg['simple_type'] if t.endswith('?'): t = t[:-1] if t == 'Tensor': if is_method and arg['name'] == 'self': # See "Note: properties and __get__" func = func.__get__(instance_gen()) continue func_args.append(instance_gen()) elif t == 'TensorList' or t == 'ITensorListRef': func_args.append([instance_gen(), instance_gen()]) elif t == 'c10::List<c10::optional<Tensor>>': func_args.append([instance_gen(), instance_gen()]) elif t == 'IntArrayRef' or t == 'SymIntArrayRef': size = arg.get('size', 2) if size == 1: func_args.append(1) else: func_args.append([1] * size) elif t == 'Scalar': func_args.append(3.5) elif t == 'bool': func_args.append(False) elif t == 'Dimname': func_args.append("") elif t == 'DimnameList': func_args.append([""]) elif t.startswith('int'): func_args.append(0) elif t in {'Stream'}: func_args.append(torch.Stream()) elif t.startswith('float') or t == 'double': func_args.append(1.0) elif t in {'Generator', 'MemoryFormat', 'TensorOptions'}: func_args.append(None) elif t == 'ScalarType': func_args.append(torch.float32) elif t == 'c10::string_view': func_args.append('') elif t == 'SymInt': # TODO: generate actual SymbolicInt func_args.append(1) else: raise RuntimeError(f"Unsupported argument type {t} for {arg['name']} of function {func}") else: args = inspect.getfullargspec(override) try: func_args = inspect.getfullargspec(func) # Remove annotations from argspec func_args = type(func_args)(**{**func_args, 'annotations': None}) if func_args != args: raise RuntimeError(f"Override for {func} doesn't match its argspec.\n" + f"Original: {inspect.signature(func)}\n" + f"Override: {inspect.signature(override)}") except TypeError: pass nargs = len(args.args) if args.defaults is not None: nargs -= len(args.defaults) func_args = [instance_gen() for _ in range(nargs)] if args.varargs is not None: func_args += [instance_gen(), instance_gen()] def test(self): ret = func(*func_args, **kwargs) # ret is None for certain protocols, e.g., `__weakref__` and `__setitem__` # This is currently the best check but doesn't work for, for example, # Tensor.__add__ because it redirects to Tensor.add. # See note "_triggered wrapper" if not is_method or ret is None: self.assertTrue(WRAPPED_TRIGGERED_IMPLS[func]._triggered) return self.assertEqual(ret, -1) return test for func, override in get_testing_overrides().items(): test_method = test_generator(func, override) if func.__name__ == "__get__": # Note: properties and __get__ # __get__ is part of the descriptor protocol. # https://docs.python.org/3/howto/descriptor.html # This is used for properties of the form # torch.Tensor.<property>, with the method __get__ # In this case we get the property name in two ways: # This case for properties defined in C. module = getattr( func.__self__, "__qualname__", None ) # This one for properties defined in Python. if module is None: module = "Tensor." + func.__self__.fget.__name__ # Unfortunately I couldn't find a way to unify these two cases # and there is no way for general descriptors. elif is_tensor_method_or_property(func): module = "Tensor" else: module = func.__module__ if module: name = 'test_{}_{}'.format(module.replace('.', '_'), func.__name__) else: name = 'test_{}'.format(func.__name__) test_method.__name__ = name setattr(cls, name, test_method)
def test_generator(func, override): # If func corresponds to a torch.Tensor method or property. if is_tensor_method_or_property(func): # Generate an instance by using SubTensor, def instance_gen(): return SubTensor([5]) else: # Otherwise, TensorLike. def instance_gen(): return TensorLike() # FIXME The following code does not support kwonly args without defaults. # The fix is easy, as one just needs to save these args when generating the variable # annotated_args. The problem is that, if one does so, one finds a number # of functions that have problematic signatures in native_functions.yaml. # Fixing these would be BC breaking, so hence this terrible hack # https://github.com/pytorch/pytorch/issues/67008 kwargs = {} if hasattr(func, "__name__") and "linalg_solve_triangular" in func.__name__: kwargs = {"upper": True} func_args = [] is_method = is_tensor_method_or_property(func) def _simple_type_parser(func, arg_name, arg_type): # Guess valid input to aten function based on type of argument if arg_type == "Tensor": return instance_gen() elif arg_type == "TensorList" or arg_type == "ITensorListRef": return [instance_gen(), instance_gen()] elif arg_type == "c10::List<::std::optional<Tensor>>": return [instance_gen(), instance_gen()] elif arg_type == "IntArrayRef" or arg_type == "SymIntArrayRef": size = arg.get("size", 2) if size == 1: return 1 else: return [1] * size elif arg_type == "Scalar": return 3.5 elif arg_type == "bool": return False elif arg_type == "Dimname": return "" elif arg_type == "DimnameList": return [""] elif arg_type.startswith("int"): return 0 elif arg_type in {"Stream"}: return torch.Stream() elif arg_type.startswith("float") or arg_type == "double": return 1.0 elif arg_type in {"Generator", "MemoryFormat", "TensorOptions"}: return None elif arg_type == "ScalarType": return torch.float32 elif arg_type == "c10::string_view": return "" elif arg_type == "SymInt": # TODO: generate actual SymbolicInt return 1 else: raise RuntimeError( f"Unsupported argument type {arg_type} for {arg_name} of function {func}" ) if func in annotated_args: for arg in annotated_args[func]: # Guess valid input to aten function based on type of argument t = arg["simple_type"] if t.endswith("?"): t = t[:-1] if t == "Tensor" and is_method and arg["name"] == "self": # See "Note: properties and __get__" func = func.__get__(instance_gen()) continue arg_to_add = _simple_type_parser(func, arg["name"], t) if "is_kwarg_only" in arg and arg["is_kwarg_only"] == str(True): kwargs[arg["name"]] = arg_to_add else: func_args.append(arg_to_add) else: args = inspect.getfullargspec(override) try: func_args = inspect.getfullargspec(func) # Remove annotations from argspec func_args = type(func_args)(**{**func_args, 'annotations': None}) if func_args != args: raise RuntimeError(f"Override for {func} doesn't match its argspec.\n" + f"Original: {inspect.signature(func)}\n" + f"Override: {inspect.signature(override)}") except TypeError: pass nargs = len(args.args) if args.defaults is not None: nargs -= len(args.defaults) func_args = [instance_gen() for _ in range(nargs)] if args.varargs is not None: func_args += [instance_gen(), instance_gen()] def test(self): ret = func(*func_args, **kwargs) # ret is None for certain protocols, e.g., `__weakref__` and `__setitem__` # This is currently the best check but doesn't work for, for example, # Tensor.__add__ because it redirects to Tensor.add. # See note "_triggered wrapper" if not is_method or ret is None: self.assertTrue(WRAPPED_TRIGGERED_IMPLS[func]._triggered) return self.assertEqual(ret, -1) return test for func, override in get_testing_overrides().items(): test_method = test_generator(func, override) if func.__name__ == "__get__": # Note: properties and __get__ # __get__ is part of the descriptor protocol. # https://docs.python.org/3/howto/descriptor.html # This is used for properties of the form # torch.Tensor.<property>, with the method __get__ # In this case we get the property name in two ways: # This case for properties defined in C. module = getattr( func.__self__, "__qualname__", None ) # This one for properties defined in Python. if module is None: module = "Tensor." + func.__self__.fget.__name__ # Unfortunately I couldn't find a way to unify these two cases # and there is no way for general descriptors. elif is_tensor_method_or_property(func): module = "Tensor" else: module = func.__module__ if module: name = 'test_{}_{}'.format(module.replace('.', '_'), func.__name__) else: name = f'test_{func.__name__}' test_method.__name__ = name setattr(cls, name, test_method)
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF from torch.overrides import ( handle_torch_function, has_torch_function, get_overridable_functions, get_testing_overrides, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_overrides.py
test_warn_on_invalid_torch_function
def test_warn_on_invalid_torch_function(self): class Bad1(): def __torch_function__(self, *args, **kwargs): pass class Bad2(torch.Tensor): def __torch_function__(self, *args, **kwargs): pass a = Bad1() for a in (Bad1(), Bad2()): with self.assertWarnsRegex(DeprecationWarning, "as a plain method is deprecated"): # Function that handles torch_function on the python side torch.nn.functional.dropout(a) with self.assertWarnsRegex(UserWarning, "as a plain method is deprecated"): # Function that handles torch_function in C++ torch.abs(a)
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF from torch.overrides import ( handle_torch_function, has_torch_function, get_overridable_functions, get_testing_overrides, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all class TestTorchFunctionWarning(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_overrides.py
test_warn_on_invalid_torch_function_standalone_class
def test_warn_on_invalid_torch_function_standalone_class(self): class StandaloneTorchFunctionClass: def __torch_function__(self, *args, **kwargs): pass a = StandaloneTorchFunctionClass() with self.assertWarnsRegex(DeprecationWarning, "as a plain method is deprecated"): # Function that handles torch_function on the python side torch.nn.functional.dropout(a) with self.assertWarnsRegex(UserWarning, "as a plain method is deprecated"): # Function that handles torch_function in C++ torch.abs(a)
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all class TestTorchFunctionWarning(TestCase): from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_overrides.py
test_warn_on_invalid_torch_function_tensor_subclass
def test_warn_on_invalid_torch_function_tensor_subclass(self): class TensorSubclassTorchFunctionClass(torch.Tensor): def __torch_function__(self, *args, **kwargs): pass b = TensorSubclassTorchFunctionClass() with self.assertWarnsRegex(DeprecationWarning, "as a plain method is deprecated"): # Function that handles torch_function on the python side torch.nn.functional.dropout(b) with self.assertWarnsRegex(UserWarning, "as a plain method is deprecated"): # Function that handles torch_function in C++ torch.abs(b)
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all class TestTorchFunctionWarning(TestCase): from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_overrides.py
test_getitem_call
def test_getitem_call(self): # This failed because the parser thinks the function is called to() # but it's actually called _parse_to() called = False class A(TorchFunctionMode): def __torch_function__(self, func, types, args=(), kwargs=None): nonlocal called if kwargs is None: kwargs = {} called = True return func(*args, **kwargs) a = torch.zeros(5) b = torch.tensor(0) with A(): a[b] self.assertTrue(called)
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all @unittest.skipIf(TEST_WITH_CROSSREF, "not run with crossref") class TestTorchFunctionMode(TestCase): from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_overrides.py
test_torch_function_all_disabled_api
def test_torch_function_all_disabled_api(self): from torch._C import _is_torch_function_all_disabled state = _is_torch_function_all_disabled() self.assertFalse(state) with torch._C.DisableTorchFunction(): state = _is_torch_function_all_disabled() self.assertTrue(state) state = _is_torch_function_all_disabled() self.assertFalse(state) with torch._C.DisableTorchFunctionSubclass(): state = _is_torch_function_all_disabled() self.assertFalse(state)
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all @unittest.skipIf(TEST_WITH_CROSSREF, "not run with crossref") class TestTorchFunctionMode(TestCase): from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_overrides.py
test_with_mode
def test_with_mode(self): class ErrorA(RuntimeError): pass class A(TorchFunctionMode): def __torch_function__(self, *args, **kwargs): raise ErrorA() with self.assertRaises(ErrorA): with A(): torch.empty([])
def test_with_mode(self): class ErrorA(RuntimeError): pass class A(TorchFunctionMode): def __torch_function__(self, *args, **kwargs): raise ErrorA with self.assertRaises(ErrorA): with A(): torch.empty([])
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF from torch.overrides import ( handle_torch_function, has_torch_function, get_overridable_functions, get_testing_overrides, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all @unittest.skipIf(TEST_WITH_CROSSREF, "not run with crossref") class TestTorchFunctionMode(TestCase):
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all @unittest.skipIf(TEST_WITH_CROSSREF, "not run with crossref") class TestTorchFunctionMode(TestCase): from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_overrides.py
test_with_mode_created_separately
def test_with_mode_created_separately(self): class ErrorA(RuntimeError): pass class A(TorchFunctionMode): def __torch_function__(self, *args, **kwargs): raise ErrorA() x = A() with self.assertRaises(ErrorA): with x: torch.empty([])
def test_with_mode_created_separately(self): class ErrorA(RuntimeError): pass class A(TorchFunctionMode): def __torch_function__(self, *args, **kwargs): raise ErrorA x = A() with self.assertRaises(ErrorA): with x: torch.empty([])
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF from torch.overrides import ( handle_torch_function, has_torch_function, get_overridable_functions, get_testing_overrides, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all @unittest.skipIf(TEST_WITH_CROSSREF, "not run with crossref") class TestTorchFunctionMode(TestCase):
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all @unittest.skipIf(TEST_WITH_CROSSREF, "not run with crossref") class TestTorchFunctionMode(TestCase): from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
_wrapper
def _wrapper(a, b, broadcast_dimensions): return prims.broadcast_in_dim(a, b.shape, broadcast_dimensions) traced = make_traced(_wrapper) make_arg = partial(make_tensor, device=device, dtype=dtype) for executor in ('aten', 'strictly_nvfuser'): fn = partial(traced, executor=executor) # Same shape shape = (5, 5) a = make_arg(shape) b = make_arg(shape, low=0.0, high=0.0) result = fn(a, b, (0, 1)) self.assertEqual(result.shape, a.shape) self.assertTrue(result.is_contiguous) self.assertEqual(a, result) # Error input: reordering dims with self.assertRaises(Exception): result = fn(a, b, (1, 0)) # Adding outermost dimensions a = make_arg((5, 5)) b = make_arg((3, 3, 5, 5), low=0.0, high=0.0) result = fn(a, b, (2, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.broadcast_to(b.shape), result) # Expands a = make_arg((1, 5, 1)) b = make_arg((3, 5, 7), low=0.0, high=0.0) result = fn(a, b, (0, 1, 2)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.expand_as(result), result) # Unsqueezes a = make_arg((1, 2, 3)) b = make_arg((1, 2, 1, 3), low=0.0, high=0.0) result = fn(a, b, (0, 1, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.unsqueeze(2), result) # FIXME: This test exposes an issue in nvfuser # Adds outermost, expands, and unsqueezes """ a = make_arg((1, 2, 3)) b = make_arg((4, 1, 7, 2, 3, 3), low=0.0, high=0.0) result = fn(a, b, (1, 3, 4)) self.assertEqual(result.shape, b.shape) a.unsqueeze_(3) a.unsqueeze_(1) a.unsqueeze_(0) self.assertEqual(a.expand_as(result), result) """
def _wrapper(a, b, broadcast_dimensions): return prims.broadcast_in_dim(a, b.shape, broadcast_dimensions) traced = make_traced(_wrapper) make_arg = partial(make_tensor, device=device, dtype=dtype) for executor in ('aten',): fn = partial(traced, executor=executor) # Same shape shape = (5, 5) a = make_arg(shape) b = make_arg(shape, low=0.0, high=0.0) result = fn(a, b, (0, 1)) self.assertEqual(result.shape, a.shape) self.assertTrue(result.is_contiguous) self.assertEqual(a, result) # Error input: reordering dims with self.assertRaises(Exception): result = fn(a, b, (1, 0)) # Adding outermost dimensions a = make_arg((5, 5)) b = make_arg((3, 3, 5, 5), low=0.0, high=0.0) result = fn(a, b, (2, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.broadcast_to(b.shape), result) # Expands a = make_arg((1, 5, 1)) b = make_arg((3, 5, 7), low=0.0, high=0.0) result = fn(a, b, (0, 1, 2)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.expand_as(result), result) # Unsqueezes a = make_arg((1, 2, 3)) b = make_arg((1, 2, 1, 3), low=0.0, high=0.0) result = fn(a, b, (0, 1, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.unsqueeze(2), result)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
test_nvfuser_impl_is_used
def test_nvfuser_impl_is_used(self, device): # This test is to ensure that when the nvfuser implementation exists it is used # Assuming one-to-one mapping between prims and nvfuser implementations # This test is not intended to test the correctness of the nvfuser implementation from nvfuser._C import FusionDefinition as fd prim_nvfuser_ops = set(torch._prims.__all__).intersection(dir(fd.ops)) ops_without_nvfuser_impl = { name for name in prim_nvfuser_ops if getattr(torch.ops.nvprims, name, None) is None } assert ( len(ops_without_nvfuser_impl) == 0 ), (f"The following prims do not have 'impl_nvfuser' defined: {ops_without_nvfuser_impl} ", "while there exists nvfuser implementations for them.")
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" class TestPrims(TestCase): from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
test_skip_ops_nvfuser_prims_mode
def test_skip_ops_nvfuser_prims_mode(self, device): # This test verifies that the NvfuserPrimsMode skips the specified # functions. Skipping a function means that it's not converted into # nvprims counterparts. from torch._prims.context import NvfuserPrimsMode a = make_tensor(5, 5, device=device, dtype=torch.float32) def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" class TestPrims(TestCase): from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
test_skip_ops_nvfuser_capability_mode
def test_skip_ops_nvfuser_capability_mode(self, device): # This test verifies that the NvfuserCapabilityMode skips the specified # functions. Skipping a function means that specific # reference/decomposition is not traced and there's no attempt to lower # it to nvprims. from torch._prims.context import TorchRefsNvfuserCapabilityMode a = make_tensor(5, 5, device=device, dtype=torch.float32) def func(a): return torch.sin(a) skip_ops = {"torch.sin", } with TorchRefsNvfuserCapabilityMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_aten_sin = any( node.target == torch.ops.aten.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_aten_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" class TestPrims(TestCase): from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_overrides.py
test_custom_device_type
def test_custom_device_type(self): class CustomDeviceContext(TorchFunctionMode): def __torch_function__(self, func, types, args=(), kwargs=None): kwargs = kwargs or {} if func == torch.device: if args and isinstance(args[0], int): args = ("xla", args[0]) elif isinstance(kwargs.get('device'), int): kwargs['device'] = f"xla:{kwargs.get('device')}" return func(*args, **kwargs) with CustomDeviceContext(): d_args = torch.device(0) self.assertEqual(d_args.type, "xla") self.assertEqual(d_args.index, 0) d_kwargs = torch.device(device=0) self.assertEqual(d_kwargs.type, "xla") self.assertEqual(d_kwargs.index, 0)
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all @unittest.skipIf(TEST_WITH_CROSSREF, "not run with crossref") class TestTorchFunctionMode(TestCase): from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_overrides.py
test_device_context_semantics
def test_device_context_semantics(self): from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext try: torch.set_default_device("cuda") def get_stack(): return [torch._C._get_function_stack_at(i) for i in range(_len_torch_function_stack())] base_mode = BaseTorchFunctionMode() with base_mode: torch.set_default_device("cpu") x = torch.ones(2, 2) stack = get_stack() self.assertIsInstance(stack[0], DeviceContext) self.assertEqual(stack[0].device, torch.device("cpu")) stack = get_stack() self.assertIsInstance(stack[0], DeviceContext) self.assertEqual(stack[0].device, torch.device("cpu")) finally: torch.set_default_device(None)
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all @unittest.skipIf(TEST_WITH_CROSSREF, "not run with crossref") class TestTorchFunctionMode(TestCase): from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_overrides.py
get_stack
if __name__ == '__main__': run_tests()
def get_stack(): return [torch._C._get_function_stack_at(i) for i in range(_len_torch_function_stack())] base_mode = BaseTorchFunctionMode() with base_mode: torch.set_default_device("cpu") x = torch.ones(2, 2) stack = get_stack() self.assertIsInstance(stack[0], DeviceContext) self.assertEqual(stack[0].device, torch.device("cpu")) stack = get_stack() self.assertIsInstance(stack[0], DeviceContext) self.assertEqual(stack[0].device, torch.device("cpu"))
import torch import numpy as np import inspect import functools import pprint import pickle import collections import unittest import contextlib from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO from torch.overrides import ( handle_torch_function, has_torch_function, get_ignored_functions, get_overridable_functions, get_testing_overrides, resolve_name, is_tensor_method_or_property, TorchFunctionMode, _get_current_function_mode, _get_current_function_mode_stack, BaseTorchFunctionMode ) from torch.utils._mode_utils import all_same_mode from torch.utils._pytree import tree_map Tensor = torch.Tensor HANDLED_FUNCTIONS_DIAGONAL = {} HANDLED_FUNCTIONS_SUB = {} HANDLED_FUNCTIONS_SUB_DIAGONAL = {} HANDLED_FUNCTIONS_TENSOR_LIKE = {} WRAPPED_TRIGGERED_IMPLS = {} from torch.testing._internal.generated.annotated_fn_args import annotated_args from torch.testing._internal.common_utils import gradcheck, gradgradcheck from torch.distributions.utils import broadcast_all from torch._C import _is_torch_function_all_disabled from torch._C import _len_torch_function_stack from torch.utils._device import DeviceContext
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_package.py
load_tests
def load_tests(loader, standard_tests, pattern): """Load all tests from `test/pacakge/` """ if pattern is None: # Use the default pattern if none is specified by the test loader. pattern = "test*.py" package_tests = loader.discover("package", pattern=pattern) standard_tests.addTests(package_tests) return standard_tests if __name__ == "__main__": from torch.testing._internal.common_utils import run_tests run_tests()
from package.package_a.test_all_leaf_modules_tracer import ( # noqa: F401 TestAllLeafModulesTracer, ) from package.package_a.test_nn_module import TestNnModule # noqa: F401 from package.test_analyze import TestAnalyze # noqa: F401 from package.test_dependency_api import TestDependencyAPI # noqa: F401 from package.test_dependency_hooks import TestDependencyHooks # noqa: F401 from package.test_digraph import TestDiGraph # noqa: F401 from package.test_directory_reader import DirectoryReaderTest # noqa: F401 from package.test_glob_group import TestGlobGroup # noqa: F401 from package.test_importer import TestImporter # noqa: F401 from package.test_load_bc_packages import TestLoadBCPackages # noqa: F401 from package.test_mangling import TestMangling # noqa: F401 from package.test_misc import TestMisc # noqa: F401 from package.test_model import ModelTest # noqa: F401 from package.test_package_fx import TestPackageFX # noqa: F401 from package.test_package_script import TestPackageScript # noqa: F401 from package.test_repackage import TestRepackage # noqa: F401 from package.test_resources import TestResources # noqa: F401 from package.test_save_load import TestSaveLoad # noqa: F401 if __name__ == "__main__": from torch.testing._internal.common_utils import run_tests run_tests()
from torch.testing._internal.common_utils import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
_wrapper
def _wrapper(a, b, broadcast_dimensions): return prims.broadcast_in_dim(a, b.shape, broadcast_dimensions) traced = make_traced(_wrapper) make_arg = partial(make_tensor, device=device, dtype=dtype) for executor in ('aten', 'strictly_nvfuser'): fn = partial(traced, executor=executor) # Same shape shape = (5, 5) a = make_arg(shape) b = make_arg(shape, low=0.0, high=0.0) result = fn(a, b, (0, 1)) self.assertEqual(result.shape, a.shape) self.assertTrue(result.is_contiguous) self.assertEqual(a, result) # Error input: reordering dims with self.assertRaises(Exception): result = fn(a, b, (1, 0)) # Adding outermost dimensions a = make_arg((5, 5)) b = make_arg((3, 3, 5, 5), low=0.0, high=0.0) result = fn(a, b, (2, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.broadcast_to(b.shape), result) # Expands a = make_arg((1, 5, 1)) b = make_arg((3, 5, 7), low=0.0, high=0.0) result = fn(a, b, (0, 1, 2)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.expand_as(result), result) # Unsqueezes a = make_arg((1, 2, 3)) b = make_arg((1, 2, 1, 3), low=0.0, high=0.0) result = fn(a, b, (0, 1, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.unsqueeze(2), result) # FIXME: This test exposes an issue in nvfuser # Adds outermost, expands, and unsqueezes """ a = make_arg((1, 2, 3)) b = make_arg((4, 1, 7, 2, 3, 3), low=0.0, high=0.0) result = fn(a, b, (1, 3, 4)) self.assertEqual(result.shape, b.shape) a.unsqueeze_(3) a.unsqueeze_(1) a.unsqueeze_(0) self.assertEqual(a.expand_as(result), result) """
def _wrapper(a, b, broadcast_dimensions): return prims.broadcast_in_dim(a, b.shape, broadcast_dimensions) traced = make_traced(_wrapper) make_arg = partial(make_tensor, device=device, dtype=dtype) for executor in ('aten',): fn = partial(traced, executor=executor) # Same shape shape = (5, 5) a = make_arg(shape) b = make_arg(shape, low=0.0, high=0.0) result = fn(a, b, (0, 1)) self.assertEqual(result.shape, a.shape) self.assertTrue(result.is_contiguous) self.assertEqual(a, result) # Error input: reordering dims with self.assertRaises(Exception): result = fn(a, b, (1, 0)) # Adding outermost dimensions a = make_arg((5, 5)) b = make_arg((3, 3, 5, 5), low=0.0, high=0.0) result = fn(a, b, (2, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.broadcast_to(b.shape), result) # Expands a = make_arg((1, 5, 1)) b = make_arg((3, 5, 7), low=0.0, high=0.0) result = fn(a, b, (0, 1, 2)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.expand_as(result), result) # Unsqueezes a = make_arg((1, 2, 3)) b = make_arg((1, 2, 1, 3), low=0.0, high=0.0) result = fn(a, b, (0, 1, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.unsqueeze(2), result)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
func1
def func1(size, value, b): return (torch.full(size, value, dtype=dtype, device=device),)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func2
def func2(size, value, b): a = torch.full(size, value, dtype=dtype, device=device) b_sin = b.sin() return (torch.add(a, b_sin),)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func3
def func3(size, value, b): return (torch.full(size, value, dtype=dtype, device=device), b)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func4
def func4(size, value, b): b_sin = b.sin() return (torch.full(size, value, dtype=dtype, device=device), b_sin)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func5
def func5(size, value, b): b_sin = b.sin() a = torch.full(size, value, dtype=dtype, device=device) a_sin = a.sin() return (a, b_sin, a_sin) for func in (func1, func3, func2, func3, func4, func5): size = (3, 3) value = 10 b = torch.randn(*size, dtype=dtype, device=device) with TorchRefsNvfuserCapabilityMode(): gm = make_fx(func)(size, value, b) out = execute(gm, size, value, b, executor="strictly_nvfuser") self.assertEqual(out, func(size, value, b))
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
test_nvfuser_executor_cached_noncontiguous
def test_nvfuser_executor_cached_noncontiguous(self, device): # This test is to ensure that nvfuser computes correct results for noncontiguous tensors from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute a = torch.randn(3, 3, device=device) def func(a): return torch.sigmoid(a) with TorchRefsNvfuserCapabilityMode(): gm = make_fx(func)(a) # First run to create the cache execute(gm, a, executor="strictly_nvfuser") # a.mT is noncontiguous, but it shouldn't affect correctness expected = execute(gm, a.mT, executor="aten") for use_python_cache in [True, False]: params = {"use_python_fusion_cache": use_python_cache} actual = execute(gm, a.mT, executor="strictly_nvfuser", executor_parameters=params) self.assertEqual(expected, actual)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" class TestPrims(TestCase): from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
test_nvfuser_capability_context
def test_nvfuser_capability_context(self, device): # This test is to ensure that the torch calls are replaced with refs # based on the nvfuser+prims capability from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode # It's assumed that digamma is not supported by nvfuser # If it's ever supported, this test will need to be updated self.assertTrue(getattr(torch.ops.nvprims, "digamma", None) is None) a = torch.randn(3, 3, device=device) def func(a): return torch.digamma(a) with TorchRefsNvfuserCapabilityMode(): gm = make_fx(func)(a) # Check that the torch.digamma is not replaced with torch.ops.prims.digamma call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) includes_aten_digamma = any( torch.ops.aten.digamma.default == node.target for node in call_function_nodes ) includes_prims_digamma = any( torch.ops.prims.digamma.default == node.target for node in call_function_nodes ) self.assertTrue(includes_aten_digamma) self.assertFalse(includes_prims_digamma) # Check mixed case, sigmoid is replaced with refs, but digamma is not def func(a): return torch.sigmoid(torch.digamma(a)) with TorchRefsNvfuserCapabilityMode(): gm = make_fx(func)(a) call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) includes_aten_sigmoid = any( torch.ops.aten.sigmoid.default == node.target for node in call_function_nodes ) includes_prims_digamma = any( torch.ops.prims.digamma.default == node.target for node in call_function_nodes ) includes_nvprims_exp = any( torch.ops.nvprims.exp.default == node.target for node in call_function_nodes ) self.assertFalse(includes_aten_sigmoid) self.assertFalse(includes_prims_digamma) self.assertTrue(includes_nvprims_exp)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" class TestPrims(TestCase): from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
func1
def func1(size, value, b): return (torch.full(size, value, dtype=dtype, device=device),)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func2
def func2(size, value, b): a = torch.full(size, value, dtype=dtype, device=device) b_sin = b.sin() return (torch.add(a, b_sin),)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
_wrapper
def _wrapper(a, b, broadcast_dimensions): return prims.broadcast_in_dim(a, b.shape, broadcast_dimensions) traced = make_traced(_wrapper) make_arg = partial(make_tensor, device=device, dtype=dtype) for executor in ('aten', 'strictly_nvfuser'): fn = partial(traced, executor=executor) # Same shape shape = (5, 5) a = make_arg(shape) b = make_arg(shape, low=0.0, high=0.0) result = fn(a, b, (0, 1)) self.assertEqual(result.shape, a.shape) self.assertTrue(result.is_contiguous) self.assertEqual(a, result) # Error input: reordering dims with self.assertRaises(Exception): result = fn(a, b, (1, 0)) # Adding outermost dimensions a = make_arg((5, 5)) b = make_arg((3, 3, 5, 5), low=0.0, high=0.0) result = fn(a, b, (2, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.broadcast_to(b.shape), result) # Expands a = make_arg((1, 5, 1)) b = make_arg((3, 5, 7), low=0.0, high=0.0) result = fn(a, b, (0, 1, 2)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.expand_as(result), result) # Unsqueezes a = make_arg((1, 2, 3)) b = make_arg((1, 2, 1, 3), low=0.0, high=0.0) result = fn(a, b, (0, 1, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.unsqueeze(2), result) # FIXME: This test exposes an issue in nvfuser # Adds outermost, expands, and unsqueezes """ a = make_arg((1, 2, 3)) b = make_arg((4, 1, 7, 2, 3, 3), low=0.0, high=0.0) result = fn(a, b, (1, 3, 4)) self.assertEqual(result.shape, b.shape) a.unsqueeze_(3) a.unsqueeze_(1) a.unsqueeze_(0) self.assertEqual(a.expand_as(result), result) """
def _wrapper(a, b, broadcast_dimensions): return prims.broadcast_in_dim(a, b.shape, broadcast_dimensions) traced = make_traced(_wrapper) make_arg = partial(make_tensor, device=device, dtype=dtype) for executor in ('aten',): fn = partial(traced, executor=executor) # Same shape shape = (5, 5) a = make_arg(shape) b = make_arg(shape, low=0.0, high=0.0) result = fn(a, b, (0, 1)) self.assertEqual(result.shape, a.shape) self.assertTrue(result.is_contiguous) self.assertEqual(a, result) # Error input: reordering dims with self.assertRaises(Exception): result = fn(a, b, (1, 0)) # Adding outermost dimensions a = make_arg((5, 5)) b = make_arg((3, 3, 5, 5), low=0.0, high=0.0) result = fn(a, b, (2, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.broadcast_to(b.shape), result) # Expands a = make_arg((1, 5, 1)) b = make_arg((3, 5, 7), low=0.0, high=0.0) result = fn(a, b, (0, 1, 2)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.expand_as(result), result) # Unsqueezes a = make_arg((1, 2, 3)) b = make_arg((1, 2, 1, 3), low=0.0, high=0.0) result = fn(a, b, (0, 1, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.unsqueeze(2), result)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
_wrapper
def _wrapper(a, b, broadcast_dimensions): return prims.broadcast_in_dim(a, b.shape, broadcast_dimensions) traced = make_traced(_wrapper) make_arg = partial(make_tensor, device=device, dtype=dtype) for executor in ('aten', 'strictly_nvfuser'): fn = partial(traced, executor=executor) # Same shape shape = (5, 5) a = make_arg(shape) b = make_arg(shape, low=0.0, high=0.0) result = fn(a, b, (0, 1)) self.assertEqual(result.shape, a.shape) self.assertTrue(result.is_contiguous) self.assertEqual(a, result) # Error input: reordering dims with self.assertRaises(Exception): result = fn(a, b, (1, 0)) # Adding outermost dimensions a = make_arg((5, 5)) b = make_arg((3, 3, 5, 5), low=0.0, high=0.0) result = fn(a, b, (2, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.broadcast_to(b.shape), result) # Expands a = make_arg((1, 5, 1)) b = make_arg((3, 5, 7), low=0.0, high=0.0) result = fn(a, b, (0, 1, 2)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.expand_as(result), result) # Unsqueezes a = make_arg((1, 2, 3)) b = make_arg((1, 2, 1, 3), low=0.0, high=0.0) result = fn(a, b, (0, 1, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.unsqueeze(2), result) # FIXME: This test exposes an issue in nvfuser # Adds outermost, expands, and unsqueezes """ a = make_arg((1, 2, 3)) b = make_arg((4, 1, 7, 2, 3, 3), low=0.0, high=0.0) result = fn(a, b, (1, 3, 4)) self.assertEqual(result.shape, b.shape) a.unsqueeze_(3) a.unsqueeze_(1) a.unsqueeze_(0) self.assertEqual(a.expand_as(result), result) """
def _wrapper(a, b, broadcast_dimensions): return prims.broadcast_in_dim(a, b.shape, broadcast_dimensions) traced = make_traced(_wrapper) make_arg = partial(make_tensor, device=device, dtype=dtype) for executor in ('aten',): fn = partial(traced, executor=executor) # Same shape shape = (5, 5) a = make_arg(shape) b = make_arg(shape, low=0.0, high=0.0) result = fn(a, b, (0, 1)) self.assertEqual(result.shape, a.shape) self.assertTrue(result.is_contiguous) self.assertEqual(a, result) # Error input: reordering dims with self.assertRaises(Exception): result = fn(a, b, (1, 0)) # Adding outermost dimensions a = make_arg((5, 5)) b = make_arg((3, 3, 5, 5), low=0.0, high=0.0) result = fn(a, b, (2, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.broadcast_to(b.shape), result) # Expands a = make_arg((1, 5, 1)) b = make_arg((3, 5, 7), low=0.0, high=0.0) result = fn(a, b, (0, 1, 2)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.expand_as(result), result) # Unsqueezes a = make_arg((1, 2, 3)) b = make_arg((1, 2, 1, 3), low=0.0, high=0.0) result = fn(a, b, (0, 1, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.unsqueeze(2), result)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
func1
def func1(size, value, b): return (torch.full(size, value, dtype=dtype, device=device),)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func2
def func2(size, value, b): a = torch.full(size, value, dtype=dtype, device=device) b_sin = b.sin() return (torch.add(a, b_sin),)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func3
def func3(size, value, b): return (torch.full(size, value, dtype=dtype, device=device), b)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func4
def func4(size, value, b): b_sin = b.sin() return (torch.full(size, value, dtype=dtype, device=device), b_sin)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func5
def func5(size, value, b): b_sin = b.sin() a = torch.full(size, value, dtype=dtype, device=device) a_sin = a.sin() return (a, b_sin, a_sin) for func in (func1, func3, func2, func3, func4, func5): size = (3, 3) value = 10 b = torch.randn(*size, dtype=dtype, device=device) with TorchRefsNvfuserCapabilityMode(): gm = make_fx(func)(size, value, b) out = execute(gm, size, value, b, executor="strictly_nvfuser") self.assertEqual(out, func(size, value, b))
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func6
def func6(a): return torch.ops.aten._unsafe_view.default(a, tuple(reversed(a.shape)))
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func7
def func7(a): return torch.ops.aten.view_copy.default(a, tuple(reversed(a.shape))) for func in (func1, func2, func3, func4, func5, func6, func7): with TorchRefsNvfuserCapabilityMode(): gm = make_fx(func)(a) call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) includes_nvprims_view = any( torch.ops.nvprims.view.default == node.target for node in call_function_nodes ) self.assertTrue(includes_nvprims_view) # Try executing the graph out = execute(gm, a, executor="strictly_nvfuser") self.assertEqual(out, func(a))
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
test_nvfuser_executor_partitioned_no_partitions_error
def test_nvfuser_executor_partitioned_no_partitions_error(self, device): # This test is to ensure that nvfuser partitioned executor works correctly # It's assumed that digamma is not supported by nvfuser # If it's ever supported, this test will need to be updated self.assertTrue(getattr(torch.ops.nvprims, "digamma", None) is None) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute a = torch.randn(3, 4, device=device) def func(a): return torch.digamma(a) # not supported by nvfuser with TorchRefsNvfuserCapabilityMode(): gm = make_fx(func)(a) with catch_warnings(record=True) as w: # Trigger warning execute(gm, a, executor="nvfuser") # Check warning occurs self.assertEqual(len(w), 1) self.assertTrue("is not supported by nvFuser" in str(w[-1].message))
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" class TestPrims(TestCase): from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
test_clone_complex
def test_clone_complex(self): with torch._dispatch.python.enable_python_dispatcher(): x = torch.randn(4, dtype=torch.complex64, device='meta').conj() out = x + 1
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode class TestPrimsBasic(TestCase): from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_prims.py
test_unbind
instantiate_device_type_tests(TestRefs, globals())
def test_unbind(self): # If unbind returns empty tuple, it breaks some assumptions in some backward tests in test_ops.py. # So can't put this test into common_methods_invocations.py. a = torch.rand([3, 0, 4]) actual = refs.unbind(a, 1) expect = torch.unbind(a, 1) self.assertEqual(actual, expect)
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode class TestRefs(TestCase): from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_prims.py
test_logspace_with_complex_input
def test_logspace_with_complex_input(self): actual = refs.logspace(2, 10 + 5j, steps=5) expect = torch.logspace(2, 10 + 5j, steps=5) self.assertEqual(actual, expect)
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode class TestRefs(TestCase): from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_prims.py
test_infinite_loop_from_py_dispatcher
from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx op = torch.ops.aten.leaky_relu_backward.default op_decomp = torch._decomp.decomposition_table.get(op)
def test_infinite_loop_from_py_dispatcher(self): # enables prim decomps with torch._dispatch.python.enable_python_dispatcher(): x = torch.ones(4) y = x.to(device="meta")
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode class TestRefs(TestCase): from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_prims.py
fn0
def fn0(*arg): return _is_func_unsupported_nvfuser(TorchRefsNvfuserCapabilityMode(), op, op_decomp, arg, {})
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
fn1
def fn1(x): x = x * 2 x = x @ x x = x * 2 return x self.assertFalse(fn0(x, y, 0.3, False)) with TorchRefsNvfuserCapabilityMode(): # Autocast context has C++ level ATen calls that are hidden from # TorchRefsNvfuserCapabilityMode that works only on Python level. # The first call to make_fx records autocast C++ calls directly and # doesn't have the chance to translate to nvprims. After the first # call, "gm" contains explicit calls to torch.ops.aten and nothing # is hidden, so the second call to make_fx actually translates # recorded autocast dtype conversions to nvprims. with torch.autocast("cuda"): gm = make_fx(fn1)(x) gm = make_fx(gm)(x) call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) includes_aten_to_copy = any( torch.ops.aten._to_copy.default == node.target for node in call_function_nodes ) self.assertFalse(includes_aten_to_copy)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_proxy_tensor.py
show_guards
def show_guards(gm): names = [strip_end(n, "_1") for n in fx_placeholder_targets(gm)] return "\n".join( gm.shape_env.produce_guards(fx_placeholder_vals(gm), names, _simplified=True) )
def show_guards(gm): names = [strip_end(n, "_1") for n in fx_placeholder_targets(gm)] return "\n".join( gm.shape_env.produce_guards(fx_placeholder_vals(gm), names, _simplified=True, input_contexts=None) )
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, xfail_inherited_tests import torch import unittest import warnings import operator from collections.abc import Iterable from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_methods_invocations import op_db, wrapper_set_seed from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( sym_float, eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets ) from torch.testing._internal.common_device_type import ops from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch import nn import re import functools import itertools aten = torch.ops.aten import sympy # noqa: F401 skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy") HAS_CUDA = torch.cuda.is_available() import torchvision from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch._dynamo.source import LocalSource
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_proxy_tensor.py
process_failures
def process_failures(): """ Takes file containing failures like FAILED test/test_proxy_tensor.py::TestProxyTensorOpInfoCPU::test_make_fx_symbolic_exhaustive___getitem___cpu_float32 - RuntimeError: aten.size.default - couldn't find symbolic meta function/decomposition # noqa: B950 and processes them into a list of opinfo xfails """ f = open('pytest_failures') failures = f.readlines() failures = [i.strip() for i in failures] def process_failure_string(s, matcher): out = re.search(matcher, s) return out.groups() SYMBOLIC_TRACE_MATCH = r'exhaustive_(.*)_cpu.*: (.*)' failures = [process_failure_string(s, SYMBOLIC_TRACE_MATCH) for s in failures] def create_normalized_name(op): if op.variant_test_name == '': s = op.name else: s = f"{op.name}.{op.variant_test_name}" return s.replace('.', '_') remap_opinfo = {create_normalized_name(op): (op.name, op.variant_test_name) for op in op_db} print("symbolic_tensor_failures = {") for failure, reason in failures: print(f" xfail{remap_opinfo[failure]}, # {reason}") print("}") # Copied from functorch
def process_failures(): """ Takes file containing failures like FAILED test/test_proxy_tensor.py::TestProxyTensorOpInfoCPU::test_make_fx_symbolic_exhaustive___getitem___cpu_float32 - RuntimeError: aten.size.default - couldn't find symbolic meta function/decomposition # noqa: B950 and processes them into a list of opinfo xfails """ f = open('pytest_failures') failures = f.readlines() failures = [i.strip() for i in failures] def process_failure_string(s, matcher): out = re.search(matcher, s) return out.groups() SYMBOLIC_TRACE_MATCH = r'exhaustive_(.*)_cpu.*: (.*)' failures = [process_failure_string(s, SYMBOLIC_TRACE_MATCH) for s in failures] def create_normalized_name(op): if op.variant_test_name == '': s = op.name else: s = f"{op.name}.{op.variant_test_name}" return s.replace('.', '_') remap_opinfo = {create_normalized_name(op): (op.name, op.variant_test_name) for op in op_db} print("symbolic_tensor_failures = {") for failure, reason in failures: print(f" xfail{remap_opinfo[failure]}, # {reason}") print("}") USE_TORCHVISION = False try: import torchvision USE_TORCHVISION = True except ImportError: warnings.warn("Couldn't import torchvision. Some of our tests use it, try " "to install it with commands from pytorch.org, post-fixed with " "`--no-deps` to avoid overwriting the pytorch installation", UserWarning)
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, xfail_inherited_tests import torch import unittest import warnings import operator from collections.abc import Iterable from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_methods_invocations import op_db, wrapper_set_seed from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( sym_float, eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets ) from torch.testing._internal.common_device_type import ops from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch import nn import re import functools import itertools aten = torch.ops.aten import sympy # noqa: F401 skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy") HAS_CUDA = torch.cuda.is_available() import torchvision from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch._dynamo.source import LocalSource
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
func
def func(a): return torch.ops.prims.sin.default(a) skip_ops = {"prims.sin.default", } with NvfuserPrimsMode(skip_ops=skip_ops): gm = make_fx(func)(a) includes_any_prims_sin = any( node.target == torch.ops.prims.sin.default for node in gm.graph.nodes ) self.assertTrue(includes_any_prims_sin) include_any_nvprims_sin = any( node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes ) self.assertFalse(include_any_nvprims_sin)
def func(a): return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a)) with TorchRefsMode(): gm = make_fx(func)(a) # Check that all call_function nodes are prims call_function_nodes = list(filter(lambda n: n.op == "call_function", gm.graph.nodes)) all_prims_namespace = all( node.target.name().startswith("prims") for node in call_function_nodes ) self.assertTrue(all_prims_namespace)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_prims.py
_wrapper
def _wrapper(a, b, broadcast_dimensions): return prims.broadcast_in_dim(a, b.shape, broadcast_dimensions) traced = make_traced(_wrapper) make_arg = partial(make_tensor, device=device, dtype=dtype) for executor in ('aten', 'strictly_nvfuser'): fn = partial(traced, executor=executor) # Same shape shape = (5, 5) a = make_arg(shape) b = make_arg(shape, low=0.0, high=0.0) result = fn(a, b, (0, 1)) self.assertEqual(result.shape, a.shape) self.assertTrue(result.is_contiguous) self.assertEqual(a, result) # Error input: reordering dims with self.assertRaises(Exception): result = fn(a, b, (1, 0)) # Adding outermost dimensions a = make_arg((5, 5)) b = make_arg((3, 3, 5, 5), low=0.0, high=0.0) result = fn(a, b, (2, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.broadcast_to(b.shape), result) # Expands a = make_arg((1, 5, 1)) b = make_arg((3, 5, 7), low=0.0, high=0.0) result = fn(a, b, (0, 1, 2)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.expand_as(result), result) # Unsqueezes a = make_arg((1, 2, 3)) b = make_arg((1, 2, 1, 3), low=0.0, high=0.0) result = fn(a, b, (0, 1, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.unsqueeze(2), result) # FIXME: This test exposes an issue in nvfuser # Adds outermost, expands, and unsqueezes """ a = make_arg((1, 2, 3)) b = make_arg((4, 1, 7, 2, 3, 3), low=0.0, high=0.0) result = fn(a, b, (1, 3, 4)) self.assertEqual(result.shape, b.shape) a.unsqueeze_(3) a.unsqueeze_(1) a.unsqueeze_(0) self.assertEqual(a.expand_as(result), result) """
def _wrapper(a, b, broadcast_dimensions): return prims.broadcast_in_dim(a, b.shape, broadcast_dimensions) traced = make_traced(_wrapper) make_arg = partial(make_tensor, device=device, dtype=dtype) for executor in ('aten',): fn = partial(traced, executor=executor) # Same shape shape = (5, 5) a = make_arg(shape) b = make_arg(shape, low=0.0, high=0.0) result = fn(a, b, (0, 1)) self.assertEqual(result.shape, a.shape) self.assertTrue(result.is_contiguous) self.assertEqual(a, result) # Error input: reordering dims with self.assertRaises(Exception): result = fn(a, b, (1, 0)) # Adding outermost dimensions a = make_arg((5, 5)) b = make_arg((3, 3, 5, 5), low=0.0, high=0.0) result = fn(a, b, (2, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.broadcast_to(b.shape), result) # Expands a = make_arg((1, 5, 1)) b = make_arg((3, 5, 7), low=0.0, high=0.0) result = fn(a, b, (0, 1, 2)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.expand_as(result), result) # Unsqueezes a = make_arg((1, 2, 3)) b = make_arg((1, 2, 1, 3), low=0.0, high=0.0) result = fn(a, b, (0, 1, 3)) self.assertEqual(result.shape, b.shape) self.assertEqual(a.unsqueeze(2), result)
from functools import partial from itertools import product import warnings from warnings import catch_warnings import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype, skipCUDAMemoryLeakCheckIf) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims.executor import make_traced import torch._refs as refs from torch.fx.experimental.proxy_tensor import make_fx import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from nvfuser._C import FusionDefinition as fd from torch._prims.context import NvfuserPrimsMode from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch._prims.nvfuser_executor import make_nvfuser_fusion from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_methods_invocations import ( sample_inputs_native_batch_norm, ) from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm from torch.fx.experimental.proxy_tensor import make_fx from functorch import functionalize from torch._prims.nvfuser_executor import _remove_empty_like_fill from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.nvfuser_executor import maybe_partition_graph from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch._prims.executor import execute from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsNvfuserCapabilityMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
from functools import partial from itertools import product import unittest import torch from torch.testing import make_tensor from torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY, set_default_dtype) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, OpDTypes, ) from torch.testing._internal.common_methods_invocations import ( op_db, ) from torch.testing._internal.common_device_type import ( ops, ) from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input import torch._prims as prims from torch._prims_common import CUDARngStateHelper from torch._prims.executor import make_traced import torch._refs as refs import scipy.special NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor" GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition" from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode from torch.fx.experimental.proxy_tensor import make_fx from torch._prims.context import TorchRefsMode
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_proxy_tensor.py
xfail
def xfail(op_name, variant_name='', *, device_type=None, dtypes=None): return (op_name, variant_name, device_type, dtypes, True)
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, xfail_inherited_tests import torch import unittest import warnings import operator from collections.abc import Iterable from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_methods_invocations import op_db, wrapper_set_seed from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( sym_float, eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets ) from torch.testing._internal.common_device_type import ops from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch import nn import re import functools import itertools aten = torch.ops.aten import sympy # noqa: F401 skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy") HAS_CUDA = torch.cuda.is_available() import torchvision from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_proxy_tensor.py
skip
def skip(op_name, variant_name='', *, device_type=None, dtypes=None): return (op_name, variant_name, device_type, dtypes, False)
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, xfail_inherited_tests import torch import unittest import warnings import operator from collections.abc import Iterable from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_methods_invocations import op_db, wrapper_set_seed from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( sym_float, eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets ) from torch.testing._internal.common_device_type import ops from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch import nn import re import functools import itertools aten = torch.ops.aten import sympy # noqa: F401 skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy") HAS_CUDA = torch.cuda.is_available() import torchvision from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_proxy_tensor.py
skipOps
def skipOps(test_case_name, base_test_name, to_skip): all_opinfos = op_db for xfail in to_skip: op_name, variant_name, device_type, dtypes, expected_failure = xfail matching_opinfos = [o for o in all_opinfos if o.name == op_name and o.variant_test_name == variant_name] assert len(matching_opinfos) >= 1, f"Couldn't find OpInfo for {xfail}" for opinfo in matching_opinfos: decorators = list(opinfo.decorators) if expected_failure: decorator = DecorateInfo(unittest.expectedFailure, test_case_name, base_test_name, device_type=device_type, dtypes=dtypes) decorators.append(decorator) else: decorator = DecorateInfo(unittest.skip("Skipped!"), test_case_name, base_test_name, device_type=device_type, dtypes=dtypes) decorators.append(decorator) opinfo.decorators = tuple(decorators) # This decorator doesn't modify fn in any way def wrapped(fn): return fn return wrapped USE_TORCHVISION = False try: import torchvision USE_TORCHVISION = True except ImportError: warnings.warn("Couldn't import torchvision. Some of our tests use it, try " "to install it with commands from pytorch.org, post-fixed with " "`--no-deps` to avoid overwriting the pytorch installation", UserWarning)
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, xfail_inherited_tests import torch import unittest import warnings import operator from collections.abc import Iterable from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_methods_invocations import op_db, wrapper_set_seed from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( sym_float, eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets ) from torch.testing._internal.common_device_type import ops from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch import nn import re import functools import itertools aten = torch.ops.aten import sympy # noqa: F401 skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy") HAS_CUDA = torch.cuda.is_available() import torchvision from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_proxy_tensor.py
wrapped
def wrapped(fn): return fn return wrapped
USE_TORCHVISION = False try: import torchvision USE_TORCHVISION = True except ImportError: warnings.warn("Couldn't import torchvision. Some of our tests use it, try " "to install it with commands from pytorch.org, post-fixed with " "`--no-deps` to avoid overwriting the pytorch installation", UserWarning)
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, xfail_inherited_tests import torch import unittest import warnings import operator from collections.abc import Iterable from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_methods_invocations import op_db, wrapper_set_seed from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( sym_float, eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets ) from torch.testing._internal.common_device_type import ops from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch import nn import re import functools import itertools aten = torch.ops.aten import sympy # noqa: F401 skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy") HAS_CUDA = torch.cuda.is_available() import torchvision from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_proxy_tensor.py
test_make_fx_model_fwd_bwd
def test_make_fx_model_fwd_bwd(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(5, 5) def forward(self, x): return self.linear(x).relu() model = Foo() def f(x, params): out = torch.func.functional_call(model, params, x).sum() out.backward() return list(params.values()) input = torch.randn(3, 5, requires_grad=True) params = dict(model.named_parameters()) fx_f = make_fx(f, tracing_mode=self.tracing_mode)(input, params) # fx may change the order of parameters in list, so using set() to compare self.assertTrue( torch.allclose(fx_f(input, params)[0], f(input, params)[0]) or torch.allclose(fx_f(input, params)[0], f(input, params)[1]) ) self.assertTrue( torch.allclose(fx_f(input, params)[1], f(input, params)[0]) or torch.allclose(fx_f(input, params)[1], f(input, params)[1]) )
def test_make_fx_model_fwd_bwd(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) def forward(self, x): return self.linear(x).relu() model = Foo() def f(x, params): out = torch.func.functional_call(model, params, x).sum() out.backward() return list(params.values()) input = torch.randn(3, 5, requires_grad=True) params = dict(model.named_parameters()) fx_f = make_fx(f, tracing_mode=self.tracing_mode)(input, params) # fx may change the order of parameters in list, so using set() to compare self.assertTrue( torch.allclose(fx_f(input, params)[0], f(input, params)[0]) or torch.allclose(fx_f(input, params)[0], f(input, params)[1]) ) self.assertTrue( torch.allclose(fx_f(input, params)[1], f(input, params)[0]) or torch.allclose(fx_f(input, params)[1], f(input, params)[1]) )
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, xfail_inherited_tests import torch import unittest import warnings import operator from collections.abc import Iterable from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_methods_invocations import op_db, wrapper_set_seed from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( sym_float, eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets ) from torch.testing._internal.common_device_type import ops from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch import nn import re import functools import itertools aten = torch.ops.aten import sympy # noqa: F401 skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy") HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision class TestGenericProxyTensor(TestCase): from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch._dynamo.source import LocalSource
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision class TestGenericProxyTensor(TestCase): from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_proxy_tensor.py
__init__
def __init__(self): super().__init__() self.linear = torch.nn.Linear(5, 5)
def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5)
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, xfail_inherited_tests import torch import unittest import warnings import operator from collections.abc import Iterable from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_methods_invocations import op_db, wrapper_set_seed from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( sym_float, eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets ) from torch.testing._internal.common_device_type import ops from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch import nn import re import functools import itertools aten = torch.ops.aten import sympy # noqa: F401 skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy") HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle class Foo(torch.nn.Module): import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch._dynamo.source import LocalSource
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher class Foo(torch.nn.Module): import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_proxy_tensor.py
test_make_fx_model_fwd_bwd_wgtupdate
def test_make_fx_model_fwd_bwd_wgtupdate(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(5, 5) def forward(self, x): return self.linear(x).relu() model = Foo() def f(args, params, buffers): for p in params.values(): p.grad = None if not isinstance(args, Iterable): args = [args] params_and_buffers = {**params, **buffers} out = torch.func.functional_call(model, params_and_buffers, args) out.sum().backward() return [p - 1e-4 * p.grad for p in params.values()] input = torch.randn(3, 5, requires_grad=True) params = dict(model.named_parameters()) buffers = dict(model.named_buffers()) fx_f = make_fx(f, tracing_mode=self.tracing_mode)(input, params, buffers) # fx may change the order of parameters in list, so using set() to compare # also there is a numerical difference in results so changing atol from 1e-08 to 1e-03 self.assertTrue( torch.allclose(fx_f(input, params, buffers)[0], f(input, params, buffers)[0], atol=1e-03) or torch.allclose(fx_f(input, params, buffers)[0], f(input, params, buffers)[1], atol=1e-03) ) self.assertTrue( torch.allclose(fx_f(input, params, buffers)[1], f(input, params, buffers)[0], atol=1e-03) or torch.allclose(fx_f(input, params, buffers)[1], f(input, params, buffers)[1], atol=1e-03) )
def test_make_fx_model_fwd_bwd_wgtupdate(self): class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) def forward(self, x): return self.linear(x).relu() model = Foo() def f(args, params, buffers): for p in params.values(): p.grad = None if not isinstance(args, Iterable): args = [args] params_and_buffers = {**params, **buffers} out = torch.func.functional_call(model, params_and_buffers, args) out.sum().backward() return [p - 1e-4 * p.grad for p in params.values()] input = torch.randn(3, 5, requires_grad=True) params = dict(model.named_parameters()) buffers = dict(model.named_buffers()) fx_f = make_fx(f, tracing_mode=self.tracing_mode)(input, params, buffers) # fx may change the order of parameters in list, so using set() to compare # also there is a numerical difference in results so changing atol from 1e-08 to 1e-03 self.assertTrue( torch.allclose(fx_f(input, params, buffers)[0], f(input, params, buffers)[0], atol=1e-03) or torch.allclose(fx_f(input, params, buffers)[0], f(input, params, buffers)[1], atol=1e-03) ) self.assertTrue( torch.allclose(fx_f(input, params, buffers)[1], f(input, params, buffers)[0], atol=1e-03) or torch.allclose(fx_f(input, params, buffers)[1], f(input, params, buffers)[1], atol=1e-03) )
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, xfail_inherited_tests import torch import unittest import warnings import operator from collections.abc import Iterable from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_methods_invocations import op_db, wrapper_set_seed from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( sym_float, eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets ) from torch.testing._internal.common_device_type import ops from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch import nn import re import functools import itertools aten = torch.ops.aten import sympy # noqa: F401 skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy") HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision class TestGenericProxyTensor(TestCase): from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch._dynamo.source import LocalSource
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision class TestGenericProxyTensor(TestCase): from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_proxy_tensor.py
__init__
def __init__(self): super().__init__() self.linear = torch.nn.Linear(5, 5)
def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5)
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, xfail_inherited_tests import torch import unittest import warnings import operator from collections.abc import Iterable from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_methods_invocations import op_db, wrapper_set_seed from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( sym_float, eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets ) from torch.testing._internal.common_device_type import ops from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch import nn import re import functools import itertools aten = torch.ops.aten import sympy # noqa: F401 skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy") HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle class Foo(torch.nn.Module): import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch._dynamo.source import LocalSource
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher class Foo(torch.nn.Module): import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_proxy_tensor.py
test_issue82547
def test_issue82547(self): x = nn.Parameter(torch.randn(3, 3)) def f(): return torch.ops.aten.t.default(x) self.assertRaisesRegex(Exception, "Please convert all Tensors", lambda: make_fx(f, tracing_mode="fake")()) class A(torch.Tensor): pass x = A(torch.randn(3, 3)) self.assertRaisesRegex(TypeError, "no implementation found", lambda: make_fx(f, tracing_mode="fake")())
def test_issue82547(self): x = nn.Parameter(torch.randn(3, 3)) def f(): return torch.ops.aten.t.default(x) self.assertRaisesRegex(Exception, "Please convert all Tensors", lambda: make_fx(f, tracing_mode="fake")()) class A(torch.Tensor): pass x = A(torch.randn(3, 3)) self.assertRaisesRegex(TypeError, "Multiple dispatch failed", lambda: make_fx(f, tracing_mode="fake")())
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, xfail_inherited_tests import torch import unittest import warnings import operator from collections.abc import Iterable from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_methods_invocations import op_db, wrapper_set_seed from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( sym_float, eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets ) from torch.testing._internal.common_device_type import ops from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch import nn import re import functools import itertools aten = torch.ops.aten import sympy # noqa: F401 skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy") HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle class TestFakeProxyTensor(TestCase): import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch._dynamo.source import LocalSource
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher class TestFakeProxyTensor(TestCase): import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_proxy_tensor.py
test_fused_adam
def test_fused_adam(self): # See https://github.com/pytorch/pytorch/issues/99356 params = [torch.randn(10, 10) for _ in range(10)] grads = [torch.randn(10, 10) for _ in range(10)] exp_avgs = [torch.randn(10, 10) for _ in range(10)] exp_avg_sqs = [torch.randn(10, 10) for _ in range(10)] max_exp_avg_sqs = [torch.randn(10, 10) for _ in range(10)] state_steps = [torch.tensor(0) for _ in range(10)] def fused_adam(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps): (new_params, _, _, _, _) = aten._fused_adam.default( params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr=0.1, beta1=0.9, beta2=0.999, weight_decay=0.01, eps=1e-8, amsgrad=False, maximize=False, ) for p, new_p in zip(params, new_params): p.copy_(new_p) return params gm = make_fx(fused_adam, tracing_mode='fake')( params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, ) ensure_ops_have_val = [aten._fused_adam.default, operator.getitem] for n in gm.graph.nodes: if n.op == "call_function" and n.target in ensure_ops_have_val: self.assertIn('val', n.meta)
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher class TestFakeProxyTensor(TestCase): import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_proxy_tensor.py
fused_adam
def fused_adam(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps): (new_params, _, _, _, _) = aten._fused_adam.default( params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr=0.1, beta1=0.9, beta2=0.999, weight_decay=0.01, eps=1e-8, amsgrad=False, maximize=False, ) for p, new_p in zip(params, new_params): p.copy_(new_p) return params gm = make_fx(fused_adam, tracing_mode='fake')( params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, ) ensure_ops_have_val = [aten._fused_adam.default, operator.getitem] for n in gm.graph.nodes: if n.op == "call_function" and n.target in ensure_ops_have_val: self.assertIn('val', n.meta)
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_proxy_tensor.py
addmm
def addmm(a, b, c, beta=1, alpha=1): if beta == 1 and alpha == 1: return NotImplemented return beta * a + alpha * (b @ c) decomposed_fx = make_fx(f, {aten.addmm.default: addmm})(*inps) self.assertEqual(fx_g(*inps), decomposed_fx(*inps)) self.assertEqual(len([n for n in fx_g.graph.nodes if n.target == aten.addmm.default]), 2) self.assertEqual(len([n for n in decomposed_fx.graph.nodes if n.target == aten.addmm.default]), 1)
def addmm(a, b, c, beta=1, alpha=1): if beta == 1 and alpha == 1: return NotImplemented return beta * a + alpha * (b @ c) decomposed_fx = make_fx(f, decomposition_table={aten.addmm.default: addmm})(*inps) self.assertEqual(fx_g(*inps), decomposed_fx(*inps)) self.assertEqual(len([n for n in fx_g.graph.nodes if n.target == aten.addmm.default]), 2) self.assertEqual(len([n for n in decomposed_fx.graph.nodes if n.target == aten.addmm.default]), 1)
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, xfail_inherited_tests import torch import unittest import warnings import operator from collections.abc import Iterable from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_methods_invocations import op_db, wrapper_set_seed from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( sym_float, eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets ) from torch.testing._internal.common_device_type import ops from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch import nn import re import functools import itertools aten = torch.ops.aten import sympy # noqa: F401 skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy") HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch._dynamo.source import LocalSource
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_proxy_tensor.py
test_non_symint_size_spec
def test_non_symint_size_spec(self): # this isn't really a proxy tensor test, but it's the most convenient # way to get a fake tensor with symbolic sizes def f(x): torch._C._non_sym_sizes(x) return x + 1 x = torch.randn(2, 3) make_fx(f, tracing_mode="symbolic")(x) # https://github.com/pytorch/pytorch/issues/108195
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher class TestSymbolicTracing(TestCase): import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_proxy_tensor.py
reflect_R_over_x
def reflect_R_over_x(R): reflect = torch.eye(3, device=R.device) reflect[0, 0] = -1 return reflect @ R @ reflect
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_proxy_tensor.py
test_unbacked_batch_resnet
def test_unbacked_batch_resnet(self): mod = torchvision.models.resnet18() def f(x, mask, params, buffers): for p in itertools.chain([x, mask], params.values(), buffers.values()): for s in p.shape: guard_int(s) x = x[mask] torch._check(x.shape[0] >= 1) for p in params.values(): p.grad = None return torch.func.functional_call(mod, {**params, **buffers}, (x,)).sum() make_fx(f, tracing_mode="symbolic")( torch.randn(3, 3, 250, 250), torch.randint(0, 2, (3,), dtype=torch.bool), dict(mod.named_parameters()), dict(mod.named_buffers()), )
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher class TestSymbolicTracing(TestCase): import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_proxy_tensor.py
test_view_divisibility_unbacked_relatively_prime
def test_view_divisibility_unbacked_relatively_prime(self): # See https://github.com/pytorch/pytorch/issues/123651 def f(x): i0 = x.item() torch._check_is_size(i0) # To trigger the original issue, the max bound has to # be chosen such that 448 / 447 < 2 (which it is.) torch._check(i0 <= 448) return torch.zeros(256 * i0).view(-1, 447) make_fx(f, tracing_mode="symbolic")(torch.tensor(256 * 447, device="cuda"))
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher class TestSymbolicTracing(TestCase): import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_proxy_tensor.py
functional_call
def functional_call(*args, **kwargs): with stateless._reparametrize_module(foo, {}): return foo(*args, **kwargs) functional_call._orig_mod = foo gm_with_stack = make_fx(functional_call, record_module_stack=True)(torch.randn(4, 4)) found = False for node in gm_with_stack.graph.nodes: if "nn_module_stack" in node.meta: if len(node.meta["nn_module_stack"]) == 1: self.assertTrue("custom_tracer_preserving_nn_module_stack.<locals>.Foo" in str(node.meta["nn_module_stack"])) found = True elif len(node.meta["nn_module_stack"]) == 2: self.assertTrue("preserving_nn_module_stack.<locals>.Bar" in str(node.meta["nn_module_stack"])) found = True else: # there can be at most 2 level self.assertTrue(False) self.assertTrue(found) gm_without_stack = make_fx(functional_call)(torch.randn(4, 4)) for node in gm_without_stack.graph.nodes: self.assertTrue("nn_module_stack" not in node.meta)
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_proxy_tensor.py
test_size_with_tensor
def test_size_with_tensor(self): def f(tensor): max_size = torch.tensor([800, 1216], dtype=torch.int64) batch_shape = [2] + list(tensor.shape[:-2]) + list(max_size) return tensor.new_empty(batch_shape) a = torch.randn(3, 800, 1199) self.assertRaisesRegex( RuntimeError, "data-dependent", lambda: make_fx(f, tracing_mode="symbolic")(a) )
def test_size_with_tensor(self): # I think I messed up writing this test case originally, I think # I'm supposed to hit an error case, but the code here works in both # eager and tracing def f(tensor): max_size = torch.tensor([800, 1216], dtype=torch.int64) batch_shape = [2] + list(tensor.shape[:-2]) + list(max_size) return tensor.new_empty(batch_shape) a = torch.randn(3, 800, 1199) f(a) make_fx(f, tracing_mode="symbolic")(a)
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, xfail_inherited_tests import torch import unittest import warnings import operator from collections.abc import Iterable from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_methods_invocations import op_db, wrapper_set_seed from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( sym_float, eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets ) from torch.testing._internal.common_device_type import ops from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch import nn import re import functools import itertools aten = torch.ops.aten import sympy # noqa: F401 skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy") HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle @skipIfNoSympy class TestSymbolicTracing(TestCase): import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch._dynamo.source import LocalSource
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher class TestSymbolicTracing(TestCase): import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_proxy_tensor.py
test_make_fx_with_custom_tracer_preserving_nn_module_stack
def test_make_fx_with_custom_tracer_preserving_nn_module_stack(self): class Bar(torch.nn.Module): def __init__(self) -> None: super().__init__() def forward(self, x): return x + 1 class Foo(torch.nn.Module): def __init__(self) -> None: super().__init__() self.bar = Bar() def forward(self, x): return x + self.bar(x) gm = make_fx(Foo())(torch.randn(4, 4)) for node in gm.graph.nodes: self.assertTrue("nn_module_stack" not in node.meta) foo = Foo() def functional_call(*args, **kwargs): with stateless._reparametrize_module(foo, {}): return foo(*args, **kwargs) functional_call._orig_mod = foo gm_with_stack = make_fx(functional_call, record_module_stack=True)(torch.randn(4, 4)) found = False for node in gm_with_stack.graph.nodes: if "nn_module_stack" in node.meta: if len(node.meta["nn_module_stack"]) == 1: self.assertTrue("custom_tracer_preserving_nn_module_stack.<locals>.Foo" in str(node.meta["nn_module_stack"])) found = True elif len(node.meta["nn_module_stack"]) == 2: self.assertTrue("preserving_nn_module_stack.<locals>.Bar" in str(node.meta["nn_module_stack"])) found = True else: # there can be at most 2 level self.assertTrue(False) self.assertTrue(found) gm_without_stack = make_fx(functional_call)(torch.randn(4, 4)) for node in gm_without_stack.graph.nodes: self.assertTrue("nn_module_stack" not in node.meta)
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher class TestSymbolicTracing(TestCase): import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_proxy_tensor.py
test_make_fx_symbolic_exhaustive_out
only_for = ("cpu") instantiate_device_type_tests(TestProxyTensorOpInfo, globals(), only_for=only_for) if __name__ == '__main__': run_tests()
def test_make_fx_symbolic_exhaustive_out(self, device, dtype, op): if not op.supports_out: self.skipTest("Op doesn't support out") _test_make_fx_helper(self, device, dtype, op, "symbolic", out=True)
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource make_fx_failures = { # unknown xfail('allclose'), xfail('equal'), # empty skip('new_empty'), skip('empty_like'), skip('empty'), skip('empty_permuted'), # flaky skip('linalg.lstsq', 'grad_oriented'), skip('nn.functional.max_unpool1d', '', device_type='cpu'), skip('nn.functional.max_unpool2d', '', device_type='cpu'), skip('nn.functional.max_unpool3d', '', device_type='cpu'), skip('linalg.lstsq'), # flaky, probably just a precision issue # data-dependent control flow skip('item'), xfail('cov'), xfail('nn.functional.gaussian_nll_loss'), xfail('tensor_split'), xfail('corrcoef'), xfail('quantile'), xfail('nanquantile'), # Seems like it's creating a sparse tensor that isn't captured by tensor.is_sparse xfail('sparse.sampled_addmm'), xfail('sparse.mm', 'reduce'), # proxy tensor doesn't support sparse correctly right now skip('to_sparse'), # segfaults skip('block_diag'), # AssertionError: Tensor-likes are not close! skip('empty_strided', '', device_type='cpu'), } only_real_tensor_failures = { xfail('narrow'), } only_fake_tensor_failures = { xfail('narrow'), } fake_tensor_failures = { # ASAN failures due to divide by 0 skip('nn.functional.nll_loss'), } symbolic_tensor_failures = { xfail('combinations', ''), xfail('geqrf', ''), # aten.geqrf.default - couldn't find symbolic meta function/decomposition xfail('histogram', ''), # Could not run 'aten::histogram.bin_ct' with arguments from the 'Meta' backend. This c... xfail('histogramdd', ''), # aten._histogramdd_bin_edges.default - couldn't find symbolic meta function/decomposition xfail('nanquantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend. xfail('nn.functional.binary_cross_entropy', ''), # aten.new_empty.default - couldn't find symbolic meta function/decom... xfail('nn.functional.cross_entropy', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('nn.functional.ctc_loss'), # aten._ctc_loss.Tensor - couldn't find symbolic meta function/decomposition xfail('quantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend. xfail('unique_consecutive', ''), # aten.unique_consecutive.default - couldn't find symbolic meta function/decomposition xfail('max_pool2d_with_indices_backward', ''), # Expected a value of type 'List[int]' for argument 'kernel_size' but... # many complex operators incorrect striding, metadata xfail('fft.fft', ''), xfail('fft.hfft2', ''), xfail('fft.hfft', ''), xfail('fft.hfftn', ''), xfail('fft.ifft', ''), xfail('fft.ihfft2', ''), xfail('fft.ihfft', ''), xfail('fft.ihfftn', ''), xfail('fft.ihfft2', ''), xfail('fft.irfft2', ''), xfail('fft.irfft', ''), xfail('fft.irfftn', ''), xfail('fft.rfft2', ''), xfail('fft.rfft', ''), xfail('fft.rfftn', ''), xfail('stft', '') } symbolic_tensor_segfaults = { skip('nn.functional.batch_norm') # Segfault?? } inplace_symbolic_tensor_failures = { # bugs xfail('float_power', ''), # base given to float_power_ has dtype Float but the operation's result requires dtype Double } out_symbolic_tensor_failures = { # Cast error details: Unable to cast (...) to Tensor # # This happens because the test is set up to call the out variant using the `out` kwarg: # torch._some_op(arg1, arg2, out=(out1, out2, out3)) # # However, this only works on torch ops, not aten ops. For `_batch_norm_with_update`, # this fails because the op has no python bindings, so it doesn't support the `out` kwarg # way of calling its out variant. xfail('_batch_norm_with_update', ''), xfail('_native_batch_norm_legit', ''), xfail('angle', ''), xfail('argmax', ''), xfail('argmin', ''), xfail('fft.fft2', ''), xfail('fft.fftn', ''), xfail('fft.ifft2', ''), xfail('fft.ifftn', ''), xfail('gather', ''), xfail('linalg.pinv', ''), xfail('linalg.pinv', 'hermitian'), xfail('lu', ''), xfail('scatter_add', ''), xfail('scatter', ''), xfail('take_along_dim', ''), xfail('triangular_solve', ''), # SymIntArrayRef expected to contain only concrete xfail('ones', ''), xfail('randn', ''), xfail('zeros', ''), # RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides xfail('index_reduce', 'prod'), xfail('index_reduce', 'mean'), xfail('index_reduce', 'amax'), xfail('index_reduce', 'amin'), } out_symbolic_tensor_segfaults = { skip('nanmean', ''), } filtered_hop_db = [op for op in hop_db if op.name != "auto_functionalize"] @unittest.skipIf(not torch._dynamo.is_dynamo_supported(), "Cond requires dynamo") class TestProxyTensorOpInfo(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_pruning_op.py
_generate_rowwise_mask
def _generate_rowwise_mask(self, embedding_rows): indicator = torch.from_numpy((np.random.random_sample(embedding_rows)).astype(np.float32)) threshold = np.random.random_sample() mask = torch.BoolTensor([True if val >= threshold else False for val in indicator]) return mask
def _generate_rowwise_mask(self, embedding_rows): indicator = torch.from_numpy((np.random.random_sample(embedding_rows)).astype(np.float32)) threshold = float(np.random.random_sample()) mask = torch.BoolTensor([True if val >= threshold else False for val in indicator]) return mask
import hypothesis.strategies as st from hypothesis import given import numpy as np import torch from torch.testing._internal.common_utils import TestCase import torch.testing._internal.hypothesis_utils as hu class PruningOpTest(TestCase):
import hypothesis.strategies as st from hypothesis import given import numpy as np import torch from torch.testing._internal.common_utils import TestCase, run_tests, skipIfTorchDynamo import torch.testing._internal.hypothesis_utils as hu class PruningOpTest(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_public_bindings.py
test_no_new_reexport_callables
def test_no_new_reexport_callables(self): """ This test aims to stop the introduction of new re-exported callables into torch whose names do not start with _. Such callables are made available as torch.XXX, which may not be desirable. """ reexported_callables = sorted( k for k, v in vars(torch).items() if callable(v) and not v.__module__.startswith("torch") ) self.assertTrue( all(k.startswith("_") for k in reexported_callables), reexported_callables )
import importlib import inspect import json import logging import os import pkgutil import unittest from typing import Callable import torch from torch._utils_internal import get_file_path_2 from torch.testing._internal.common_utils import ( IS_JETSON, IS_MACOS, IS_WINDOWS, run_tests, skipIfTorchDynamo, TestCase, ) log = logging.getLogger(__name__) class TestPublicBindings(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_proxy_tensor.py
_test_make_fx_helper
def _test_make_fx_helper(self, device, dtype, op, tracing_mode, inplace=False): def f(args, kwargs, extra_args, extra_kwargs): if extra_args: for i, t in extra_args: args[i] = t.size() if extra_kwargs: for k, t in extra_kwargs.items(): kwargs[k] = t.size() fn = _get_safe_inplace(op.get_inplace()) if inplace else op.op return fn(*args, **kwargs) sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False) new_f = None # Limit ourselves to first 100 inputs so symbolic tracing tests don't take too long for sample_input in itertools.islice(sample_inputs_itr, 100): if inplace and sample_input.broadcasts_input: continue args = [sample_input.input] + list(sample_input.args) kwargs = sample_input.kwargs # If any argument is a torch.Size(), maybe get dynamic shapes for it by: # - Create a temporary Tensor whose size is the torch.Size() we want. Note that # we use an expanded Tensor as we cannot pass "meta" Tensors to make_fx. # - Pass it to make_fx such that it is is converted to a proxy Tensor # - Unpack the size in the wrapper to get a torch.Size with dynamic shapes (in # symbolic mode, a no-op otherwise) extra_args = [] extra_kwargs = {} for i, arg in enumerate(args): if isinstance(arg, torch.Size): extra_args.append((i, torch.empty(arg, device="cpu"))) for key, value in kwargs.items(): if isinstance(value, torch.Size): extra_kwargs[key] = torch.empty(value, device="cpu") try: new_f = make_fx(f, tracing_mode=tracing_mode)(args, kwargs, extra_args, extra_kwargs) except DynamicOutputShapeException as e: self.skipTest("Dynamic output shape operation in trace") for arg in args: if isinstance(arg, torch.Tensor) and arg.dtype == torch.float: arg.uniform_(0, 1) try: old_out = f(args, kwargs, extra_args, extra_kwargs) except Exception: continue new_out = wrapper_set_seed(new_f, args, kwargs, extra_args, extra_kwargs) self.assertEqual(new_out, old_out) class TestProxyTensorOpInfo(TestCase): @ops(op_db, allowed_dtypes=(torch.float,)) @skipOps('TestProxyTensorOpInfo', 'test_make_fx_exhaustive', make_fx_failures) def test_make_fx_exhaustive(self, device, dtype, op): _test_make_fx_helper(self, device, dtype, op, "real") @ops(op_db, allowed_dtypes=(torch.float,)) @skipOps('TestProxyTensorOpInfo', 'test_make_fx_fake_exhaustive', make_fx_failures.union(fake_tensor_failures)) def test_make_fx_fake_exhaustive(self, device, dtype, op): _test_make_fx_helper(self, device, dtype, op, "fake") @skipIfNoSympy @ops(op_db, allowed_dtypes=(torch.float,)) @skipOps('TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive', make_fx_failures | fake_tensor_failures | symbolic_tensor_failures | outplace_symbolic_tensor_failures) def test_make_fx_symbolic_exhaustive(self, device, dtype, op): _test_make_fx_helper(self, device, dtype, op, "symbolic") @skipIfNoSympy @ops(op_db, allowed_dtypes=(torch.float,)) @skipOps('TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive_inplace', make_fx_failures | fake_tensor_failures | symbolic_tensor_failures | inplace_symbolic_tensor_failures) def test_make_fx_symbolic_exhaustive_inplace(self, device, dtype, op): if not op.get_inplace(): self.skipTest("No inplace variable for this op") _test_make_fx_helper(self, device, dtype, op, "symbolic", inplace=True) only_for = ("cpu") instantiate_device_type_tests(TestProxyTensorOpInfo, globals(), only_for=only_for) if __name__ == '__main__': run_tests()
def _test_make_fx_helper(self, device, dtype, op, tracing_mode, inplace=False, out=False): fn = _get_safe_inplace(op.get_inplace()) if inplace else op.op sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False) # Limit ourselves to first 100 inputs so symbolic tracing tests don't take too long count = 100 if out: count = 5 for sample_input in itertools.islice(sample_inputs_itr, count): if inplace and sample_input.broadcasts_input: continue args = [sample_input.input] + list(sample_input.args) kwargs = sample_input.kwargs if out: expected = fn(*args, **kwargs) kwargs['out'] = expected try: optests.make_fx_check(fn, args, kwargs, tracing_mode, self.assertEqual, randomize_data=True) except DynamicOutputShapeException: self.skipTest("Dynamic output shape operation in trace")
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, xfail_inherited_tests import torch import unittest import warnings import operator from collections.abc import Iterable from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_methods_invocations import op_db, wrapper_set_seed from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( sym_float, eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets ) from torch.testing._internal.common_device_type import ops from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch import nn import re import functools import itertools aten = torch.ops.aten import sympy # noqa: F401 skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy") HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch._dynamo.source import LocalSource make_fx_failures = { # unknown xfail('allclose'), xfail('equal'), # empty skip('new_empty'), skip('empty_like'), skip('empty'), # flaky skip('linalg.lstsq', 'grad_oriented'), skip('nn.functional.max_unpool1d', '', device_type='cpu'), skip('nn.functional.max_unpool2d', '', device_type='cpu'), skip('nn.functional.max_unpool3d', '', device_type='cpu'), skip('linalg.lstsq'), # flaky, probably just a precision issue # data-dependent control flow xfail('cov'), xfail('istft'), xfail('nn.functional.gaussian_nll_loss'), xfail('tensor_split'), xfail('corrcoef'), xfail('quantile'), xfail('nanquantile'), xfail('narrow'), # Seems like it's creating a sparse tensor that isn't captured by tensor.is_sparse xfail('sparse.sampled_addmm'), xfail('sparse.mm', 'reduce'), # proxy tensor doesn't support sparse correctly right now skip('to_sparse'), # segfaults skip('block_diag'), } fake_tensor_failures = { # FakeTensor fallback doesn't work xfail('_segment_reduce', 'lengths'), xfail('multinomial'), xfail('cholesky'), xfail('cholesky_inverse'), # cannot do these as they rely on tensor data xfail('repeat_interleave'), # ASAN failures due to divide by 0 skip('nn.functional.nll_loss'), } symbolic_tensor_failures = { # Needs complex-value support xfail('polar'), xfail('linalg.eig'), xfail('linalg.eigvals'), skip('masked.logsumexp', ''), # Tensors of type TensorImpl do not have numel xfail('masked.cumprod', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition xfail('addmv', ''), # aten.addmv.default - couldn't find symbolic meta function/decomposition xfail('aminmax', ''), # aten.aminmax.default - couldn't find symbolic meta function/decomposition xfail('argwhere', ''), # aten.nonzero.default - couldn't find symbolic meta function/decomposition xfail('baddbmm', ''), # aten.baddbmm.default - couldn't find symbolic meta function/decomposition xfail('cdist', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('cholesky_solve', ''), # Could not run 'aten::_cholesky_solve_helper' with arguments from the 'Meta' back... xfail('column_stack', ''), # Tensors of type TensorImpl do not have numel xfail('combinations', ''), xfail('count_nonzero', ''), # Could not run 'aten::count_nonzero.dim_IntList' with arguments from the 'Meta' ba... xfail('cross', ''), # aten.linalg_cross.default - couldn't find symbolic meta function/decomposition xfail('cummax', ''), # aten.cummax.default - couldn't find symbolic meta function/decomposition xfail('cummin', ''), # aten.cummin.default - couldn't find symbolic meta function/decomposition xfail('cumprod', ''), # aten.cumprod.default - couldn't find symbolic meta function/decomposition xfail('cumulative_trapezoid', ''), # aten.slice.Tensor - couldn't find symbolic meta function/decomposition xfail('diff', ''), # aten.empty_like.default - couldn't find symbolic meta function/decomposition xfail('dsplit', ''), # aten.slice.Tensor - couldn't find symbolic meta function/decomposition xfail('fft.fft2', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.fft', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.fftn', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.fftshift', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.hfft2', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.hfft', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition xfail('fft.hfftn', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.ifft2', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.ifft', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.ifftn', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.ifftshift', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.ihfft2', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.ihfft', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.ihfftn', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.irfft2', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.irfft', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition xfail('fft.irfftn', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.rfft2', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.rfft', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('fft.rfftn', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('unflatten', ''), # RuntimeError: Trying to call aten.size on a tensor with symbolic shapes... xfail('frexp', ''), # aten.frexp.Tensor - couldn't find symbolic meta function/decomposition xfail('geqrf', ''), # aten.geqrf.default - couldn't find symbolic meta function/decomposition xfail('gradient', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('histc', ''), # Could not run 'aten::histc' with arguments from the 'Meta' backend. This could be because... xfail('histogram', ''), # Could not run 'aten::histogram.bin_ct' with arguments from the 'Meta' backend. This c... xfail('histogramdd', ''), # aten._histogramdd_bin_edges.default - couldn't find symbolic meta function/decomposition xfail('hsplit', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('index_reduce', ''), # Float xfail('inner', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('isin', ''), # aten.isin.Tensor_Tensor - couldn't find symbolic meta function/decomposition xfail('kron', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('kthvalue', ''), # aten.kthvalue.default - couldn't find symbolic meta function/decomposition xfail('linalg.cond', ''), # Tensors of type TensorImpl do not have numel xfail('linalg.cross', ''), # aten.linalg_cross.default - couldn't find symbolic meta function/decomposition xfail('linalg.eigh', ''), # aten._linalg_eigh.default - couldn't find symbolic meta function/decomposition xfail('linalg.eigvalsh', ''), # aten._linalg_eigh.default - couldn't find symbolic meta function/decomposition xfail('linalg.householder_product', ''), # aten.linalg_householder_product.default - couldn't find symbolic meta funct... xfail('linalg.ldl_factor', ''), # aten.linalg_ldl_factor_ex.default - couldn't find symbolic meta function/decomposition xfail('linalg.ldl_factor_ex', ''), # aten.linalg_ldl_factor_ex.default - couldn't find symbolic meta function/decompos... xfail('linalg.ldl_solve', ''), # aten.linalg_ldl_solve.default - couldn't find symbolic meta function/decomposition xfail('linalg.lu', ''), # aten.linalg_lu.default - couldn't find symbolic meta function/decomposition xfail('linalg.lu_factor', ''), # aten.linalg_lu_factor_ex.default - couldn't find symbolic meta function/decomposition xfail('linalg.lu_factor_ex', ''), # aten.linalg_lu_factor_ex.default - couldn't find symbolic meta function/decomposition xfail('linalg.lu_solve', ''), # aten.linalg_lu_solve.default - couldn't find symbolic meta function/decomposition xfail('linalg.matrix_power'), # RuntimeError: Trying to call aten.size on a tensor with symbolic shape xfail('linalg.matrix_rank', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('linalg.matrix_rank', 'hermitian'), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('linalg.multi_dot', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('linalg.pinv', ''), # aten.linalg_pinv.atol_rtol_tensor - couldn't find symbolic meta function/decomposition xfail('linalg.pinv', 'singular'), # aten.linalg_cholesky_ex.default - couldn't find symbolic meta function/decomposition xfail('linalg.pinv', 'hermitian'), # aten.linalg_pinv.atol_rtol_tensor - couldn't find symbolic meta function/decompo... xfail('linalg.qr', ''), # aten.linalg_qr.default - couldn't find symbolic meta function/decomposition xfail('linalg.slogdet', ''), # aten._linalg_slogdet.default - couldn't find symbolic meta function/decomposition xfail('linalg.solve', ''), # aten._linalg_solve_ex.default - couldn't find symbolic meta function/decomposition xfail('linalg.solve_ex', ''), # aten._linalg_solve_ex.default - couldn't find symbolic meta function/decomposition xfail('linalg.solve_triangular', ''), # aten.linalg_solve_triangular.default - couldn't find symbolic meta function/de... xfail('linalg.tensorinv', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('linalg.tensorsolve', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('linalg.vander', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('logaddexp2', ''), # aten.logaddexp2.default - couldn't find symbolic meta function/decomposition xfail('logcumsumexp', ''), # aten.logcumsumexp.default - couldn't find symbolic meta function/decomposition xfail('logdet', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('lu', ''), # aten.linalg_lu_factor_ex.default - couldn't find symbolic meta function/decomposition xfail('lu_solve', ''), # aten.linalg_lu_solve.default - couldn't find symbolic meta function/decomposition xfail('lu_unpack', ''), # aten.lu_unpack.default - couldn't find symbolic meta function/decomposition xfail('masked_select', ''), # aten.masked_select.default - couldn't find symbolic meta function/decomposition xfail('matrix_exp', ''), # aten.linalg_matrix_exp.default - couldn't find symbolic meta function/decomposition xfail('median', ''), # Could not run 'aten::median' with arguments from the 'Meta' backend. This could be becau... xfail('min', 'reduction_with_dim'), # aten.min.dim - couldn't find symbolic meta function/decomposition xfail('mode', ''), # aten.mode.default - couldn't find symbolic meta function/decomposition xfail('nanquantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend. xfail('narrow', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('max_pool2d_with_indices_backward', ''), # (symint math failure) Given input size: (s0xs1x2). Calculated ... xfail('nn.functional.adaptive_max_pool1d', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('nn.functional.adaptive_max_pool2d', ''), # aten.adaptive_max_pool2d.default - couldn't find symbolic meta funct... xfail('nn.functional.adaptive_max_pool3d', ''), # argument 'output_size' (position 2) must be tupl... xfail('nn.functional.avg_pool3d', ''), # aten.avg_pool3d.default - couldn't find symbolic meta function/decomposition xfail('nn.functional.bilinear', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('nn.functional.binary_cross_entropy', ''), # aten.new_empty.default - couldn't find symbolic meta function/decom... xfail('nn.functional.cosine_similarity', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('nn.functional.cross_entropy', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('nn.functional.ctc_loss'), # aten._ctc_loss.Tensor - couldn't find symbolic meta function/decomposition xfail('nn.functional.embedding_bag', ''), # aten._embedding_bag_forward_only.default - couldn't find symbolic meta fun... xfail('nn.functional.fractional_max_pool2d', ''), # argument 'size' must be tuple of ints, but found element of t... xfail('nn.functional.fractional_max_pool3d', ''), # argument 'size' must be tuple of ints, but found element of t... xfail('nn.functional.grid_sample', ''), # aten.grid_sampler_2d.default - couldn't find symbolic meta function/decompos... xfail('nn.functional.interpolate', 'area'), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('nn.functional.interpolate', 'linear'), # aten.upsample_linear1d.vec - couldn't find symbolic meta function/dec... xfail('nn.functional.interpolate', 'trilinear'), # aten.upsample_trilinear3d.vec - couldn't find symbolic meta functi... xfail('nn.functional.max_pool1d', ''), # Trying to call aten.size on a tensor with symbolic shapes. xfail('nn.functional.max_pool3d', ''), # aten.max_pool3d_with_indices.default - couldn't find symbolic meta function/d... xfail('nn.functional.max_unpool1d', 'grad'), # aten.max_unpool2d.default - couldn't find symbolic meta function/decom... xfail('nn.functional.max_unpool2d', 'grad'), # aten.max_unpool2d.default - couldn't find symbolic meta function/decom... xfail('nn.functional.max_unpool3d', 'grad'), # aten.max_unpool3d.default - couldn't find symbolic meta function/decom... xfail('nn.functional.multi_margin_loss', ''), # Could not run 'aten::multi_margin_loss' with arguments from the... xfail('nn.functional.multilabel_margin_loss', ''), # Could not run 'aten::multilabel_margin_loss_forward' with ... xfail('nn.functional.pad', 'reflect'), # aten.reflection_pad1d.default - couldn't find symbolic meta function/decompo... xfail('nn.functional.pad', 'replicate'), # aten.replication_pad1d.default - couldn't find symbolic meta function/deco... xfail('nn.functional.pdist', ''), # Could not run 'aten::_pdist_forward' with arguments from the 'Meta' backend... xfail('nn.functional.pixel_unshuffle', ''), # aten.pixel_unshuffle.default - couldn't find symbolic meta function/deco... xfail('nn.functional.smooth_l1_loss', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('nonzero', ''), # aten.nonzero.default - couldn't find symbolic meta function/decomposition xfail('normal', ''), # aten.normal.Tensor_Tensor - couldn't find symbolic meta function/decomposition xfail('normal', 'number_mean'), # aten.normal.float_Tensor - couldn't find symbolic meta function/decomposition xfail('ormqr', ''), # aten.ormqr.default - couldn't find symbolic meta function/decomposition xfail('pca_lowrank', ''), # aten.mm.default - couldn't find symbolic meta function/decomposition xfail('pinverse', ''), # aten.linalg_pinv.atol_rtol_tensor - couldn't find symbolic meta function/decomposition xfail('polygamma', 'polygamma_n_0'), # aten.polygamma.default - couldn't find symbolic meta function/decomposition xfail('polygamma', 'polygamma_n_1'), # aten.polygamma.default - couldn't find symbolic meta function/decomposition xfail('polygamma', 'polygamma_n_2'), # aten.polygamma.default - couldn't find symbolic meta function/decomposition xfail('polygamma', 'polygamma_n_3'), # aten.polygamma.default - couldn't find symbolic meta function/decomposition xfail('polygamma', 'polygamma_n_4'), # aten.polygamma.default - couldn't find symbolic meta function/decomposition xfail('quantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend. xfail('qr', ''), # aten.linalg_qr.default - couldn't find symbolic meta function/decomposition xfail('renorm', ''), # aten.renorm.default - couldn't find symbolic meta function/decomposition xfail('repeat_interleave', ''), # Cannot call sizes() on tensor with symbolic sizes/strides xfail('resize_', ''), # aten.clone.default - couldn't find symbolic meta function/decomposition xfail('resize_as_', ''), # aten.clone.default - couldn't find symbolic meta function/decomposition xfail('roll', ''), # Tensors of type TensorImpl do not have numel xfail('searchsorted', ''), # Could not run 'aten::searchsorted.Tensor' with arguments from the 'Meta' backend. ... xfail('_segment_reduce', 'offsets'), # aten.segment_reduce.default - couldn't find symbolic meta function/decomposition xfail('special.airy_ai', ''), # aten.special_airy_ai.default - couldn't find symbolic meta function/decomposition xfail('special.bessel_y0', ''), # aten.special_bessel_y0.default - couldn't find symbolic meta function/decomposition xfail('special.bessel_y1', ''), # aten.special_bessel_y1.default - couldn't find symbolic meta function/decomposition xfail('special.chebyshev_polynomial_t', ''), # aten.special_chebyshev_polynomial_t.default - couldn't find symbolic me... xfail('special.chebyshev_polynomial_u', ''), # aten.special_chebyshev_polynomial_u.default - couldn't find symbolic me... xfail('special.hermite_polynomial_h', ''), # aten.special_hermite_polynomial_h.default - couldn't find symbolic meta f... xfail('special.hermite_polynomial_he', ''), # aten.special_hermite_polynomial_he.default - couldn't find symbolic meta... xfail('special.laguerre_polynomial_l', ''), # aten.special_laguerre_polynomial_l.default - couldn't find symbolic meta... xfail('special.modified_bessel_i0', ''), # aten.special_modified_bessel_i0.default - couldn't find symbolic meta funct... xfail('special.modified_bessel_i1', ''), # aten.special_modified_bessel_i1.default - couldn't find symbolic meta funct... xfail('special.modified_bessel_k0', ''), # aten.special_modified_bessel_k0.default - couldn't find symbolic meta funct... xfail('special.modified_bessel_k1', ''), # aten.special_modified_bessel_k1.default - couldn't find symbolic meta funct... xfail('special.polygamma', 'special_polygamma_n_0'), # aten.polygamma.default - couldn't find symbolic meta function/... xfail('special.scaled_modified_bessel_k0', ''), # aten.special_scaled_modified_bessel_k0.default - couldn't find symbo... xfail('special.scaled_modified_bessel_k1', ''), # aten.special_scaled_modified_bessel_k1.default - couldn't find symbo... xfail('stft', ''), # argument 'size' must be tuple of ints, but found element of type torch._C.SymIntNode at... xfail('sum_to_size', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('svd_lowrank', ''), # aten.mm.default - couldn't find symbolic meta function/decomposition xfail('take_along_dim', ''), # dtype of indices should be Long but got Float xfail('take', ''), # aten.take.default - couldn't find symbolic meta function/decomposition xfail('tensordot', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('trapz', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('trapezoid', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('triangular_solve', ''), # aten.triangular_solve.default - couldn't find symbolic meta function/decomposition xfail('vsplit', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('unique_consecutive', ''), # aten.unique_consecutive.default - couldn't find symbolic meta function/decomposition xfail('unique', ''), # aten._unique2.default - couldn't find symbolic meta function/decomposition } symbolic_tensor_segfaults = { skip('nn.functional.batch_norm') # Segfault?? } outplace_symbolic_tensor_failures = { xfail('i0', ''), # aten.i0.default - couldn't find symbolic meta function/decomposition xfail('masked_scatter', ''), # aten.masked_scatter.default - couldn't find symbolic meta function/decomposition xfail('nn.functional.rrelu', ''), # aten.empty_like.default - couldn't find symbolic meta function/decomposition } inplace_symbolic_tensor_failures = { # bugs xfail('float_power', ''), # base given to float_power_ has dtype Float but the operation's result requires dtype Double # decomp not implemented xfail('unique', ''), # in-place has a different signature than out-of-place xfail('uniform', ''), # Views xfail('t', ''), xfail('transpose', ''), }
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource make_fx_failures = { # unknown xfail('allclose'), xfail('equal'), # empty skip('new_empty'), skip('empty_like'), skip('empty'), skip('empty_permuted'), # flaky skip('linalg.lstsq', 'grad_oriented'), skip('nn.functional.max_unpool1d', '', device_type='cpu'), skip('nn.functional.max_unpool2d', '', device_type='cpu'), skip('nn.functional.max_unpool3d', '', device_type='cpu'), skip('linalg.lstsq'), # flaky, probably just a precision issue # data-dependent control flow skip('item'), xfail('cov'), xfail('nn.functional.gaussian_nll_loss'), xfail('tensor_split'), xfail('corrcoef'), xfail('quantile'), xfail('nanquantile'), # Seems like it's creating a sparse tensor that isn't captured by tensor.is_sparse xfail('sparse.sampled_addmm'), xfail('sparse.mm', 'reduce'), # proxy tensor doesn't support sparse correctly right now skip('to_sparse'), # segfaults skip('block_diag'), # AssertionError: Tensor-likes are not close! skip('empty_strided', '', device_type='cpu'), } only_real_tensor_failures = { xfail('narrow'), } only_fake_tensor_failures = { xfail('narrow'), } fake_tensor_failures = { # ASAN failures due to divide by 0 skip('nn.functional.nll_loss'), } symbolic_tensor_failures = { xfail('combinations', ''), xfail('geqrf', ''), # aten.geqrf.default - couldn't find symbolic meta function/decomposition xfail('histogram', ''), # Could not run 'aten::histogram.bin_ct' with arguments from the 'Meta' backend. This c... xfail('histogramdd', ''), # aten._histogramdd_bin_edges.default - couldn't find symbolic meta function/decomposition xfail('nanquantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend. xfail('nn.functional.binary_cross_entropy', ''), # aten.new_empty.default - couldn't find symbolic meta function/decom... xfail('nn.functional.cross_entropy', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('nn.functional.ctc_loss'), # aten._ctc_loss.Tensor - couldn't find symbolic meta function/decomposition xfail('quantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend. xfail('unique_consecutive', ''), # aten.unique_consecutive.default - couldn't find symbolic meta function/decomposition xfail('max_pool2d_with_indices_backward', ''), # Expected a value of type 'List[int]' for argument 'kernel_size' but... # many complex operators incorrect striding, metadata xfail('fft.fft', ''), xfail('fft.hfft2', ''), xfail('fft.hfft', ''), xfail('fft.hfftn', ''), xfail('fft.ifft', ''), xfail('fft.ihfft2', ''), xfail('fft.ihfft', ''), xfail('fft.ihfftn', ''), xfail('fft.ihfft2', ''), xfail('fft.irfft2', ''), xfail('fft.irfft', ''), xfail('fft.irfftn', ''), xfail('fft.rfft2', ''), xfail('fft.rfft', ''), xfail('fft.rfftn', ''), xfail('stft', '') } symbolic_tensor_segfaults = { skip('nn.functional.batch_norm') # Segfault?? } inplace_symbolic_tensor_failures = { # bugs xfail('float_power', ''), # base given to float_power_ has dtype Float but the operation's result requires dtype Double } out_symbolic_tensor_failures = { # Cast error details: Unable to cast (...) to Tensor # # This happens because the test is set up to call the out variant using the `out` kwarg: # torch._some_op(arg1, arg2, out=(out1, out2, out3)) # # However, this only works on torch ops, not aten ops. For `_batch_norm_with_update`, # this fails because the op has no python bindings, so it doesn't support the `out` kwarg # way of calling its out variant. xfail('_batch_norm_with_update', ''), xfail('_native_batch_norm_legit', ''), xfail('angle', ''), xfail('argmax', ''), xfail('argmin', ''), xfail('fft.fft2', ''), xfail('fft.fftn', ''), xfail('fft.ifft2', ''), xfail('fft.ifftn', ''), xfail('gather', ''), xfail('linalg.pinv', ''), xfail('linalg.pinv', 'hermitian'), xfail('lu', ''), xfail('scatter_add', ''), xfail('scatter', ''), xfail('take_along_dim', ''), xfail('triangular_solve', ''), # SymIntArrayRef expected to contain only concrete xfail('ones', ''), xfail('randn', ''), xfail('zeros', ''), # RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides xfail('index_reduce', 'prod'), xfail('index_reduce', 'mean'), xfail('index_reduce', 'amax'), xfail('index_reduce', 'amin'), } out_symbolic_tensor_segfaults = { skip('nanmean', ''), }
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_proxy_tensor.py
skipIfNameMatches
def skipIfNameMatches(pattern): """ Decorator to skip a test if its name matches the given pattern. """ def decorator(test_func): def wrapper(*args, **kwargs): if re.match(pattern, test_func.__name__): raise unittest.SkipTest(f"Test '{test_func.__name__}' skipped because its name matches the pattern '{pattern}'") return test_func(*args, **kwargs) return wrapper return decorator # Auto functionalize shouldn't work with make_fx directly filtered_hop_db = [op for op in hop_db if op.name != "auto_functionalize"] @unittest.skipIf(not torch._dynamo.is_dynamo_supported(), "Cond requires dynamo") class TestProxyTensorOpInfo(TestCase): @ops(op_db + filtered_hop_db + custom_op_db, allowed_dtypes=(torch.float,)) @skipOps('TestProxyTensorOpInfo', 'test_make_fx_exhaustive', make_fx_failures.union(only_real_tensor_failures)) def test_make_fx_exhaustive(self, device, dtype, op): _test_make_fx_helper(self, device, dtype, op, "real") @ops(op_db + filtered_hop_db + custom_op_db, allowed_dtypes=(torch.float,)) @skipOps('TestProxyTensorOpInfo', 'test_make_fx_fake_exhaustive', make_fx_failures.union(fake_tensor_failures, only_fake_tensor_failures)) def test_make_fx_fake_exhaustive(self, device, dtype, op): _test_make_fx_helper(self, device, dtype, op, "fake") @ops(op_db + filtered_hop_db + custom_op_db, allowed_dtypes=(torch.float,)) @skipOps('TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive', make_fx_failures | fake_tensor_failures | symbolic_tensor_failures) def test_make_fx_symbolic_exhaustive(self, device, dtype, op): _test_make_fx_helper(self, device, dtype, op, "symbolic") @ops(op_db + custom_op_db, allowed_dtypes=(torch.float,)) @skipOps('TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive_inplace', make_fx_failures | fake_tensor_failures | symbolic_tensor_failures | inplace_symbolic_tensor_failures) def test_make_fx_symbolic_exhaustive_inplace(self, device, dtype, op): if not op.get_inplace(): self.skipTest("No inplace variable for this op") _test_make_fx_helper(self, device, dtype, op, "symbolic", inplace=True) @ops(op_db + custom_op_db, allowed_dtypes=(torch.float,)) @skipOps('TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive_out', make_fx_failures | fake_tensor_failures | symbolic_tensor_failures | out_symbolic_tensor_failures) def test_make_fx_symbolic_exhaustive_out(self, device, dtype, op): if not op.supports_out: self.skipTest("Op doesn't support out") _test_make_fx_helper(self, device, dtype, op, "symbolic", out=True) only_for = ("cpu") instantiate_device_type_tests(TestProxyTensorOpInfo, globals(), only_for=only_for) if __name__ == '__main__': run_tests()
from torch.testing._internal.common_utils import TestCase, run_tests import torch import torch._dynamo import unittest import warnings import operator from collections.abc import Iterable from torch.nn.utils import stateless from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_methods_invocations import op_db, skip, xfail, skipOps from torch._subclasses.fake_tensor import DynamicOutputShapeException, DataDependentOutputException, FakeTensorMode from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode from torch._decomp import decomposition_table from torch.fx.experimental.symbolic_shapes import ( eval_guards, bind_symbols, fx_placeholder_vals, fx_placeholder_targets, guard_int, GuardOnDataDependentSymNode ) from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.hop_db import hop_db from torch.testing._internal.common_device_type import ops import torch.testing._internal.optests as optests from torch._C import _disabled_torch_function_impl from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule from torch.utils._pytree import tree_map from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts from torch import nn import torch._functorch.config import re import functools import itertools aten = torch.ops.aten HAS_CUDA = torch.cuda.is_available() USE_TORCHVISION = False import torchvision from torch._dispatch.python import enable_python_dispatcher from torch.testing._internal.logging_tensor import LoggingTensorMode from torch.testing._internal.logging_tensor import LoggingTensor import pickle from torch._dispatch.python import enable_python_dispatcher from torch._dispatch.python import enable_python_dispatcher import torch.library from torch.library import Library from torch._functorch.compilers import DebugInterpreter from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch.fx.experimental.symbolic_shapes import ShapeEnv from torch._dynamo.source import LocalSource make_fx_failures = { # unknown xfail('allclose'), xfail('equal'), # empty skip('new_empty'), skip('empty_like'), skip('empty'), skip('empty_permuted'), # flaky skip('linalg.lstsq', 'grad_oriented'), skip('nn.functional.max_unpool1d', '', device_type='cpu'), skip('nn.functional.max_unpool2d', '', device_type='cpu'), skip('nn.functional.max_unpool3d', '', device_type='cpu'), skip('linalg.lstsq'), # flaky, probably just a precision issue # data-dependent control flow skip('item'), xfail('cov'), xfail('nn.functional.gaussian_nll_loss'), xfail('tensor_split'), xfail('corrcoef'), xfail('quantile'), xfail('nanquantile'), # Seems like it's creating a sparse tensor that isn't captured by tensor.is_sparse xfail('sparse.sampled_addmm'), xfail('sparse.mm', 'reduce'), # proxy tensor doesn't support sparse correctly right now skip('to_sparse'), # segfaults skip('block_diag'), # AssertionError: Tensor-likes are not close! skip('empty_strided', '', device_type='cpu'), } only_real_tensor_failures = { xfail('narrow'), } only_fake_tensor_failures = { xfail('narrow'), } fake_tensor_failures = { # ASAN failures due to divide by 0 skip('nn.functional.nll_loss'), } symbolic_tensor_failures = { xfail('combinations', ''), xfail('geqrf', ''), # aten.geqrf.default - couldn't find symbolic meta function/decomposition xfail('histogram', ''), # Could not run 'aten::histogram.bin_ct' with arguments from the 'Meta' backend. This c... xfail('histogramdd', ''), # aten._histogramdd_bin_edges.default - couldn't find symbolic meta function/decomposition xfail('nanquantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend. xfail('nn.functional.binary_cross_entropy', ''), # aten.new_empty.default - couldn't find symbolic meta function/decom... xfail('nn.functional.cross_entropy', ''), # aten.size.default - couldn't find symbolic meta function/decomposition xfail('nn.functional.ctc_loss'), # aten._ctc_loss.Tensor - couldn't find symbolic meta function/decomposition xfail('quantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend. xfail('unique_consecutive', ''), # aten.unique_consecutive.default - couldn't find symbolic meta function/decomposition xfail('max_pool2d_with_indices_backward', ''), # Expected a value of type 'List[int]' for argument 'kernel_size' but... # many complex operators incorrect striding, metadata xfail('fft.fft', ''), xfail('fft.hfft2', ''), xfail('fft.hfft', ''), xfail('fft.hfftn', ''), xfail('fft.ifft', ''), xfail('fft.ihfft2', ''), xfail('fft.ihfft', ''), xfail('fft.ihfftn', ''), xfail('fft.ihfft2', ''), xfail('fft.irfft2', ''), xfail('fft.irfft', ''), xfail('fft.irfftn', ''), xfail('fft.rfft2', ''), xfail('fft.rfft', ''), xfail('fft.rfftn', ''), xfail('stft', '') } symbolic_tensor_segfaults = { skip('nn.functional.batch_norm') # Segfault?? } inplace_symbolic_tensor_failures = { # bugs xfail('float_power', ''), # base given to float_power_ has dtype Float but the operation's result requires dtype Double } out_symbolic_tensor_failures = { # Cast error details: Unable to cast (...) to Tensor # # This happens because the test is set up to call the out variant using the `out` kwarg: # torch._some_op(arg1, arg2, out=(out1, out2, out3)) # # However, this only works on torch ops, not aten ops. For `_batch_norm_with_update`, # this fails because the op has no python bindings, so it doesn't support the `out` kwarg # way of calling its out variant. xfail('_batch_norm_with_update', ''), xfail('_native_batch_norm_legit', ''), xfail('angle', ''), xfail('argmax', ''), xfail('argmin', ''), xfail('fft.fft2', ''), xfail('fft.fftn', ''), xfail('fft.ifft2', ''), xfail('fft.ifftn', ''), xfail('gather', ''), xfail('linalg.pinv', ''), xfail('linalg.pinv', 'hermitian'), xfail('lu', ''), xfail('scatter_add', ''), xfail('scatter', ''), xfail('take_along_dim', ''), xfail('triangular_solve', ''), # SymIntArrayRef expected to contain only concrete xfail('ones', ''), xfail('randn', ''), xfail('zeros', ''), # RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides xfail('index_reduce', 'prod'), xfail('index_reduce', 'mean'), xfail('index_reduce', 'amax'), xfail('index_reduce', 'amin'), } out_symbolic_tensor_segfaults = { skip('nanmean', ''), }
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/test_public_bindings.py
test_correct_module_names
def test_correct_module_names(self): ''' An API is considered public, if its `__module__` starts with `torch.` and there is no name in `__module__` or the object itself that starts with “_”. Each public package should either: - (preferred) Define `__all__` and all callables and classes in there must have their `__module__` start with the current submodule's path. Things not in `__all__` should NOT have their `__module__` start with the current submodule. - (for simple python-only modules) Not define `__all__` and all the elements in `dir(submod)` must have their `__module__` that start with the current submodule. ''' failure_list = [] with open(os.path.join(os.path.dirname(__file__), 'allowlist_for_publicAPI.json')) as json_file: # no new entries should be added to this allow_dict. # New APIs must follow the public API guidelines. allow_dict = json.load(json_file) # Because we want minimal modifications to the `allowlist_for_publicAPI.json`, # we are adding the entries for the migrated modules here from the original # locations. for modname in allow_dict["being_migrated"]: if modname in allow_dict: allow_dict[allow_dict["being_migrated"][modname]] = allow_dict[modname] def test_module(modname): split_strs = modname.split('.') mod = sys.modules.get(modname) for elem in split_strs: if elem.startswith("_"): return # verifies that each public API has the correct module name and naming semantics def check_one_element(elem, modname, mod, *, is_public, is_all): obj = getattr(mod, elem) if not (isinstance(obj, Callable) or inspect.isclass(obj)): return elem_module = getattr(obj, '__module__', None) # Only used for nice error message below why_not_looks_public = "" if elem_module is None: why_not_looks_public = "because it does not have a `__module__` attribute" # If a module is being migrated from foo.a to bar.a (that is entry {"foo": "bar"}), # the module's starting package would be referred to as the new location even # if there is a "from foo import a" inside the "bar.py". modname = allow_dict["being_migrated"].get(modname, modname) elem_modname_starts_with_mod = elem_module is not None and \ elem_module.startswith(modname) and \ '._' not in elem_module if not why_not_looks_public and not elem_modname_starts_with_mod: why_not_looks_public = f"because its `__module__` attribute (`{elem_module}`) is not within the " \ f"torch library or does not start with the submodule where it is defined (`{modname}`)" # elem's name must NOT begin with an `_` and it's module name # SHOULD start with it's current module since it's a public API looks_public = not elem.startswith('_') and elem_modname_starts_with_mod if not why_not_looks_public and not looks_public: why_not_looks_public = f"because it starts with `_` (`{elem}`)" if is_public != looks_public: if modname in NOT_IMPORTED_WHEN_TEST_WRITTEN: return if modname in allow_dict and elem in allow_dict[modname]: return if is_public: why_is_public = f"it is inside the module's (`{modname}`) `__all__`" if is_all else \ "it is an attribute that does not start with `_` on a module that " \ "does not have `__all__` defined" fix_is_public = f"remove it from the modules's (`{modname}`) `__all__`" if is_all else \ f"either define a `__all__` for `{modname}` or add a `_` at the beginning of the name" else: assert is_all why_is_public = f"it is not inside the module's (`{modname}`) `__all__`" fix_is_public = f"add it from the modules's (`{modname}`) `__all__`" if looks_public: why_looks_public = "it does look public because it follows the rules from the doc above " \ "(does not start with `_` and has a proper `__module__`)." fix_looks_public = "make its name start with `_`" else: why_looks_public = why_not_looks_public if not elem_modname_starts_with_mod: fix_looks_public = "make sure the `__module__` is properly set and points to a submodule "\ f"of `{modname}`" else: fix_looks_public = "remove the `_` at the beginning of the name" failure_list.append(f"# {modname}.{elem}:") is_public_str = "" if is_public else " NOT" failure_list.append(f" - Is{is_public_str} public: {why_is_public}") looks_public_str = "" if looks_public else " NOT" failure_list.append(f" - Does{looks_public_str} look public: {why_looks_public}") # Swap the str below to avoid having to create the NOT again failure_list.append(" - You can do either of these two things to fix this problem:") failure_list.append(f" - To make it{looks_public_str} public: {fix_is_public}") failure_list.append(f" - To make it{is_public_str} look public: {fix_looks_public}") if hasattr(mod, '__all__'): public_api = mod.__all__ all_api = dir(mod) for elem in all_api: check_one_element(elem, modname, mod, is_public=elem in public_api, is_all=True) else: all_api = dir(mod) for elem in all_api: if not elem.startswith('_'): check_one_element(elem, modname, mod, is_public=True, is_all=False) for _, modname, ispkg in pkgutil.walk_packages(path=torch.__path__, prefix=torch.__name__ + '.'): test_module(modname) test_module('torch') msg = "All the APIs below do not meet our guidelines for public API from " \ "https://github.com/pytorch/pytorch/wiki/Public-API-definition-and-documentation.\n" msg += "Make sure that everything that is public is expected (in particular that the module " \ "has a properly populated `__all__` attribute) and that everything that is supposed to be public " \ "does look public (it does not start with `_` and has a `__module__` that is properly populated)." msg += "\n\nFull list:\n" msg += "\n".join(map(str, failure_list)) # empty lists are considered false in python self.assertTrue(not failure_list, msg)
def test_correct_module_names(self): """ An API is considered public, if its `__module__` starts with `torch.` and there is no name in `__module__` or the object itself that starts with "_". Each public package should either: - (preferred) Define `__all__` and all callables and classes in there must have their `__module__` start with the current submodule's path. Things not in `__all__` should NOT have their `__module__` start with the current submodule. - (for simple python-only modules) Not define `__all__` and all the elements in `dir(submod)` must have their `__module__` that start with the current submodule. """ failure_list = [] with open( get_file_path_2(os.path.dirname(__file__), "allowlist_for_publicAPI.json") ) as json_file: # no new entries should be added to this allow_dict. # New APIs must follow the public API guidelines. allow_dict = json.load(json_file) # Because we want minimal modifications to the `allowlist_for_publicAPI.json`, # we are adding the entries for the migrated modules here from the original # locations. for modname in allow_dict["being_migrated"]: if modname in allow_dict: allow_dict[allow_dict["being_migrated"][modname]] = allow_dict[ modname ] def test_module(modname): try: if "__main__" in modname: return mod = importlib.import_module(modname) except Exception: # It is ok to ignore here as we have a test above that ensures # this should never happen return if not self._is_mod_public(modname): return # verifies that each public API has the correct module name and naming semantics def check_one_element(elem, modname, mod, *, is_public, is_all): obj = getattr(mod, elem) # torch.dtype is not a class nor callable, so we need to check for it separately if not ( isinstance(obj, (Callable, torch.dtype)) or inspect.isclass(obj) ): return elem_module = getattr(obj, "__module__", None) # Only used for nice error message below why_not_looks_public = "" if elem_module is None: why_not_looks_public = ( "because it does not have a `__module__` attribute" ) # If a module is being migrated from foo.a to bar.a (that is entry {"foo": "bar"}), # the module's starting package would be referred to as the new location even # if there is a "from foo import a" inside the "bar.py". modname = allow_dict["being_migrated"].get(modname, modname) elem_modname_starts_with_mod = ( elem_module is not None and elem_module.startswith(modname) and "._" not in elem_module ) if not why_not_looks_public and not elem_modname_starts_with_mod: why_not_looks_public = ( f"because its `__module__` attribute (`{elem_module}`) is not within the " f"torch library or does not start with the submodule where it is defined (`{modname}`)" ) # elem's name must NOT begin with an `_` and it's module name # SHOULD start with it's current module since it's a public API looks_public = not elem.startswith("_") and elem_modname_starts_with_mod if not why_not_looks_public and not looks_public: why_not_looks_public = f"because it starts with `_` (`{elem}`)" if is_public != looks_public: if modname in allow_dict and elem in allow_dict[modname]: return if is_public: why_is_public = ( f"it is inside the module's (`{modname}`) `__all__`" if is_all else "it is an attribute that does not start with `_` on a module that " "does not have `__all__` defined" ) fix_is_public = ( f"remove it from the modules's (`{modname}`) `__all__`" if is_all else f"either define a `__all__` for `{modname}` or add a `_` at the beginning of the name" ) else: assert is_all why_is_public = ( f"it is not inside the module's (`{modname}`) `__all__`" ) fix_is_public = ( f"add it from the modules's (`{modname}`) `__all__`" ) if looks_public: why_looks_public = ( "it does look public because it follows the rules from the doc above " "(does not start with `_` and has a proper `__module__`)." ) fix_looks_public = "make its name start with `_`" else: why_looks_public = why_not_looks_public if not elem_modname_starts_with_mod: fix_looks_public = ( "make sure the `__module__` is properly set and points to a submodule " f"of `{modname}`" ) else: fix_looks_public = ( "remove the `_` at the beginning of the name" ) failure_list.append(f"# {modname}.{elem}:") is_public_str = "" if is_public else " NOT" failure_list.append( f" - Is{is_public_str} public: {why_is_public}" ) looks_public_str = "" if looks_public else " NOT" failure_list.append( f" - Does{looks_public_str} look public: {why_looks_public}" ) # Swap the str below to avoid having to create the NOT again failure_list.append( " - You can do either of these two things to fix this problem:" ) failure_list.append( f" - To make it{looks_public_str} public: {fix_is_public}" ) failure_list.append( f" - To make it{is_public_str} look public: {fix_looks_public}" ) if hasattr(mod, "__all__"): public_api = mod.__all__ all_api = dir(mod) for elem in all_api: check_one_element( elem, modname, mod, is_public=elem in public_api, is_all=True ) else: all_api = dir(mod) for elem in all_api: if not elem.startswith("_"): check_one_element( elem, modname, mod, is_public=True, is_all=False ) for mod in pkgutil.walk_packages(torch.__path__, "torch."): modname = mod.name test_module(modname) test_module("torch") msg = ( "All the APIs below do not meet our guidelines for public API from " "https://github.com/pytorch/pytorch/wiki/Public-API-definition-and-documentation.\n" ) msg += ( "Make sure that everything that is public is expected (in particular that the module " "has a properly populated `__all__` attribute) and that everything that is supposed to be public " "does look public (it does not start with `_` and has a `__module__` that is properly populated)." ) msg += "\n\nFull list:\n" msg += "\n".join(map(str, failure_list)) # empty lists are considered false in python self.assertTrue(not failure_list, msg)
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS import pkgutil import torch import sys from typing import Callable import inspect import json import os import unittest NOT_IMPORTED_WHEN_TEST_WRITTEN = { "torch.fx.experimental.normalize", "torch.fx.experimental.proxy_tensor", "torch.fx.experimental.schema_type_annotation", "torch.fx.experimental.symbolic_shapes", "torch.fx.passes.backends.cudagraphs", "torch.fx.passes.infra.partitioner", "torch.fx.passes.utils.fuser_utils", } class TestPublicBindings(TestCase):
import importlib import inspect import json import logging import os import pkgutil import unittest from typing import Callable import torch from torch._utils_internal import get_file_path_2 from torch.testing._internal.common_utils import ( IS_JETSON, IS_MACOS, IS_WINDOWS, run_tests, skipIfTorchDynamo, TestCase, ) log = logging.getLogger(__name__) class TestPublicBindings(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_public_bindings.py
test_module
def test_module(modname): split_strs = modname.split('.') mod = sys.modules.get(modname) for elem in split_strs: if elem.startswith("_"): return # verifies that each public API has the correct module name and naming semantics def check_one_element(elem, modname, mod, *, is_public, is_all): obj = getattr(mod, elem) if not (isinstance(obj, Callable) or inspect.isclass(obj)): return elem_module = getattr(obj, '__module__', None) # Only used for nice error message below why_not_looks_public = "" if elem_module is None: why_not_looks_public = "because it does not have a `__module__` attribute" # If a module is being migrated from foo.a to bar.a (that is entry {"foo": "bar"}), # the module's starting package would be referred to as the new location even # if there is a "from foo import a" inside the "bar.py". modname = allow_dict["being_migrated"].get(modname, modname) elem_modname_starts_with_mod = elem_module is not None and \ elem_module.startswith(modname) and \ '._' not in elem_module if not why_not_looks_public and not elem_modname_starts_with_mod: why_not_looks_public = f"because its `__module__` attribute (`{elem_module}`) is not within the " \ f"torch library or does not start with the submodule where it is defined (`{modname}`)" # elem's name must NOT begin with an `_` and it's module name # SHOULD start with it's current module since it's a public API looks_public = not elem.startswith('_') and elem_modname_starts_with_mod if not why_not_looks_public and not looks_public: why_not_looks_public = f"because it starts with `_` (`{elem}`)" if is_public != looks_public: if modname in NOT_IMPORTED_WHEN_TEST_WRITTEN: return if modname in allow_dict and elem in allow_dict[modname]: return if is_public: why_is_public = f"it is inside the module's (`{modname}`) `__all__`" if is_all else \ "it is an attribute that does not start with `_` on a module that " \ "does not have `__all__` defined" fix_is_public = f"remove it from the modules's (`{modname}`) `__all__`" if is_all else \ f"either define a `__all__` for `{modname}` or add a `_` at the beginning of the name" else: assert is_all why_is_public = f"it is not inside the module's (`{modname}`) `__all__`" fix_is_public = f"add it from the modules's (`{modname}`) `__all__`" if looks_public: why_looks_public = "it does look public because it follows the rules from the doc above " \ "(does not start with `_` and has a proper `__module__`)." fix_looks_public = "make its name start with `_`" else: why_looks_public = why_not_looks_public if not elem_modname_starts_with_mod: fix_looks_public = "make sure the `__module__` is properly set and points to a submodule "\ f"of `{modname}`" else: fix_looks_public = "remove the `_` at the beginning of the name" failure_list.append(f"# {modname}.{elem}:") is_public_str = "" if is_public else " NOT" failure_list.append(f" - Is{is_public_str} public: {why_is_public}") looks_public_str = "" if looks_public else " NOT" failure_list.append(f" - Does{looks_public_str} look public: {why_looks_public}") # Swap the str below to avoid having to create the NOT again failure_list.append(" - You can do either of these two things to fix this problem:") failure_list.append(f" - To make it{looks_public_str} public: {fix_is_public}") failure_list.append(f" - To make it{is_public_str} look public: {fix_looks_public}") if hasattr(mod, '__all__'): public_api = mod.__all__ all_api = dir(mod) for elem in all_api: check_one_element(elem, modname, mod, is_public=elem in public_api, is_all=True) else: all_api = dir(mod) for elem in all_api: if not elem.startswith('_'): check_one_element(elem, modname, mod, is_public=True, is_all=False) for _, modname, ispkg in pkgutil.walk_packages(path=torch.__path__, prefix=torch.__name__ + '.'): test_module(modname) test_module('torch') msg = "All the APIs below do not meet our guidelines for public API from " \ "https://github.com/pytorch/pytorch/wiki/Public-API-definition-and-documentation.\n" msg += "Make sure that everything that is public is expected (in particular that the module " \ "has a properly populated `__all__` attribute) and that everything that is supposed to be public " \ "does look public (it does not start with `_` and has a `__module__` that is properly populated)." msg += "\n\nFull list:\n" msg += "\n".join(map(str, failure_list)) # empty lists are considered false in python self.assertTrue(not failure_list, msg)
def test_module(modname): try: if "__main__" in modname: return mod = importlib.import_module(modname) except Exception: # It is ok to ignore here as we have a test above that ensures # this should never happen return if not self._is_mod_public(modname): return # verifies that each public API has the correct module name and naming semantics def check_one_element(elem, modname, mod, *, is_public, is_all): obj = getattr(mod, elem) # torch.dtype is not a class nor callable, so we need to check for it separately if not ( isinstance(obj, (Callable, torch.dtype)) or inspect.isclass(obj) ): return elem_module = getattr(obj, "__module__", None) # Only used for nice error message below why_not_looks_public = "" if elem_module is None: why_not_looks_public = ( "because it does not have a `__module__` attribute" ) # If a module is being migrated from foo.a to bar.a (that is entry {"foo": "bar"}), # the module's starting package would be referred to as the new location even # if there is a "from foo import a" inside the "bar.py". modname = allow_dict["being_migrated"].get(modname, modname) elem_modname_starts_with_mod = ( elem_module is not None and elem_module.startswith(modname) and "._" not in elem_module ) if not why_not_looks_public and not elem_modname_starts_with_mod: why_not_looks_public = ( f"because its `__module__` attribute (`{elem_module}`) is not within the " f"torch library or does not start with the submodule where it is defined (`{modname}`)" ) # elem's name must NOT begin with an `_` and it's module name # SHOULD start with it's current module since it's a public API looks_public = not elem.startswith("_") and elem_modname_starts_with_mod if not why_not_looks_public and not looks_public: why_not_looks_public = f"because it starts with `_` (`{elem}`)" if is_public != looks_public: if modname in allow_dict and elem in allow_dict[modname]: return if is_public: why_is_public = ( f"it is inside the module's (`{modname}`) `__all__`" if is_all else "it is an attribute that does not start with `_` on a module that " "does not have `__all__` defined" ) fix_is_public = ( f"remove it from the modules's (`{modname}`) `__all__`" if is_all else f"either define a `__all__` for `{modname}` or add a `_` at the beginning of the name" ) else: assert is_all why_is_public = ( f"it is not inside the module's (`{modname}`) `__all__`" ) fix_is_public = ( f"add it from the modules's (`{modname}`) `__all__`" ) if looks_public: why_looks_public = ( "it does look public because it follows the rules from the doc above " "(does not start with `_` and has a proper `__module__`)." ) fix_looks_public = "make its name start with `_`" else: why_looks_public = why_not_looks_public if not elem_modname_starts_with_mod: fix_looks_public = ( "make sure the `__module__` is properly set and points to a submodule " f"of `{modname}`" ) else: fix_looks_public = ( "remove the `_` at the beginning of the name" ) failure_list.append(f"# {modname}.{elem}:") is_public_str = "" if is_public else " NOT" failure_list.append( f" - Is{is_public_str} public: {why_is_public}" ) looks_public_str = "" if looks_public else " NOT" failure_list.append( f" - Does{looks_public_str} look public: {why_looks_public}" ) # Swap the str below to avoid having to create the NOT again failure_list.append( " - You can do either of these two things to fix this problem:" ) failure_list.append( f" - To make it{looks_public_str} public: {fix_is_public}" ) failure_list.append( f" - To make it{is_public_str} look public: {fix_looks_public}" ) if hasattr(mod, "__all__"): public_api = mod.__all__ all_api = dir(mod) for elem in all_api: check_one_element( elem, modname, mod, is_public=elem in public_api, is_all=True ) else: all_api = dir(mod) for elem in all_api: if not elem.startswith("_"): check_one_element( elem, modname, mod, is_public=True, is_all=False ) for mod in pkgutil.walk_packages(torch.__path__, "torch."): modname = mod.name test_module(modname) test_module("torch") msg = ( "All the APIs below do not meet our guidelines for public API from " "https://github.com/pytorch/pytorch/wiki/Public-API-definition-and-documentation.\n" ) msg += ( "Make sure that everything that is public is expected (in particular that the module " "has a properly populated `__all__` attribute) and that everything that is supposed to be public " "does look public (it does not start with `_` and has a `__module__` that is properly populated)." ) msg += "\n\nFull list:\n" msg += "\n".join(map(str, failure_list)) # empty lists are considered false in python self.assertTrue(not failure_list, msg)
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS import pkgutil import torch import sys from typing import Callable import inspect import json import os import unittest NOT_IMPORTED_WHEN_TEST_WRITTEN = { "torch.fx.experimental.normalize", "torch.fx.experimental.proxy_tensor", "torch.fx.experimental.schema_type_annotation", "torch.fx.experimental.symbolic_shapes", "torch.fx.passes.backends.cudagraphs", "torch.fx.passes.infra.partitioner", "torch.fx.passes.utils.fuser_utils", }
import importlib import inspect import json import logging import os import pkgutil import unittest from typing import Callable import torch from torch._utils_internal import get_file_path_2 from torch.testing._internal.common_utils import ( IS_JETSON, IS_MACOS, IS_WINDOWS, run_tests, skipIfTorchDynamo, TestCase, ) log = logging.getLogger(__name__)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/test_public_bindings.py
check_one_element
def check_one_element(elem, modname, mod, *, is_public, is_all): obj = getattr(mod, elem) if not (isinstance(obj, Callable) or inspect.isclass(obj)): return elem_module = getattr(obj, '__module__', None) # Only used for nice error message below why_not_looks_public = "" if elem_module is None: why_not_looks_public = "because it does not have a `__module__` attribute" # If a module is being migrated from foo.a to bar.a (that is entry {"foo": "bar"}), # the module's starting package would be referred to as the new location even # if there is a "from foo import a" inside the "bar.py". modname = allow_dict["being_migrated"].get(modname, modname) elem_modname_starts_with_mod = elem_module is not None and \ elem_module.startswith(modname) and \ '._' not in elem_module if not why_not_looks_public and not elem_modname_starts_with_mod: why_not_looks_public = f"because its `__module__` attribute (`{elem_module}`) is not within the " \ f"torch library or does not start with the submodule where it is defined (`{modname}`)" # elem's name must NOT begin with an `_` and it's module name # SHOULD start with it's current module since it's a public API looks_public = not elem.startswith('_') and elem_modname_starts_with_mod if not why_not_looks_public and not looks_public: why_not_looks_public = f"because it starts with `_` (`{elem}`)" if is_public != looks_public: if modname in NOT_IMPORTED_WHEN_TEST_WRITTEN: return if modname in allow_dict and elem in allow_dict[modname]: return if is_public: why_is_public = f"it is inside the module's (`{modname}`) `__all__`" if is_all else \ "it is an attribute that does not start with `_` on a module that " \ "does not have `__all__` defined" fix_is_public = f"remove it from the modules's (`{modname}`) `__all__`" if is_all else \ f"either define a `__all__` for `{modname}` or add a `_` at the beginning of the name" else: assert is_all why_is_public = f"it is not inside the module's (`{modname}`) `__all__`" fix_is_public = f"add it from the modules's (`{modname}`) `__all__`" if looks_public: why_looks_public = "it does look public because it follows the rules from the doc above " \ "(does not start with `_` and has a proper `__module__`)." fix_looks_public = "make its name start with `_`" else: why_looks_public = why_not_looks_public if not elem_modname_starts_with_mod: fix_looks_public = "make sure the `__module__` is properly set and points to a submodule "\ f"of `{modname}`" else: fix_looks_public = "remove the `_` at the beginning of the name" failure_list.append(f"# {modname}.{elem}:") is_public_str = "" if is_public else " NOT" failure_list.append(f" - Is{is_public_str} public: {why_is_public}") looks_public_str = "" if looks_public else " NOT" failure_list.append(f" - Does{looks_public_str} look public: {why_looks_public}") # Swap the str below to avoid having to create the NOT again failure_list.append(" - You can do either of these two things to fix this problem:") failure_list.append(f" - To make it{looks_public_str} public: {fix_is_public}") failure_list.append(f" - To make it{is_public_str} look public: {fix_looks_public}") if hasattr(mod, '__all__'): public_api = mod.__all__ all_api = dir(mod) for elem in all_api: check_one_element(elem, modname, mod, is_public=elem in public_api, is_all=True) else: all_api = dir(mod) for elem in all_api: if not elem.startswith('_'): check_one_element(elem, modname, mod, is_public=True, is_all=False)
def check_one_element(elem, modname, mod, *, is_public, is_all): obj = getattr(mod, elem) # torch.dtype is not a class nor callable, so we need to check for it separately if not ( isinstance(obj, (Callable, torch.dtype)) or inspect.isclass(obj) ): return elem_module = getattr(obj, "__module__", None) # Only used for nice error message below why_not_looks_public = "" if elem_module is None: why_not_looks_public = ( "because it does not have a `__module__` attribute" ) # If a module is being migrated from foo.a to bar.a (that is entry {"foo": "bar"}), # the module's starting package would be referred to as the new location even # if there is a "from foo import a" inside the "bar.py". modname = allow_dict["being_migrated"].get(modname, modname) elem_modname_starts_with_mod = ( elem_module is not None and elem_module.startswith(modname) and "._" not in elem_module ) if not why_not_looks_public and not elem_modname_starts_with_mod: why_not_looks_public = ( f"because its `__module__` attribute (`{elem_module}`) is not within the " f"torch library or does not start with the submodule where it is defined (`{modname}`)" ) # elem's name must NOT begin with an `_` and it's module name # SHOULD start with it's current module since it's a public API looks_public = not elem.startswith("_") and elem_modname_starts_with_mod if not why_not_looks_public and not looks_public: why_not_looks_public = f"because it starts with `_` (`{elem}`)" if is_public != looks_public: if modname in allow_dict and elem in allow_dict[modname]: return if is_public: why_is_public = ( f"it is inside the module's (`{modname}`) `__all__`" if is_all else "it is an attribute that does not start with `_` on a module that " "does not have `__all__` defined" ) fix_is_public = ( f"remove it from the modules's (`{modname}`) `__all__`" if is_all else f"either define a `__all__` for `{modname}` or add a `_` at the beginning of the name" ) else: assert is_all why_is_public = ( f"it is not inside the module's (`{modname}`) `__all__`" ) fix_is_public = ( f"add it from the modules's (`{modname}`) `__all__`" ) if looks_public: why_looks_public = ( "it does look public because it follows the rules from the doc above " "(does not start with `_` and has a proper `__module__`)." ) fix_looks_public = "make its name start with `_`" else: why_looks_public = why_not_looks_public if not elem_modname_starts_with_mod: fix_looks_public = ( "make sure the `__module__` is properly set and points to a submodule " f"of `{modname}`" ) else: fix_looks_public = ( "remove the `_` at the beginning of the name" ) failure_list.append(f"# {modname}.{elem}:") is_public_str = "" if is_public else " NOT" failure_list.append( f" - Is{is_public_str} public: {why_is_public}" ) looks_public_str = "" if looks_public else " NOT" failure_list.append( f" - Does{looks_public_str} look public: {why_looks_public}" ) # Swap the str below to avoid having to create the NOT again failure_list.append( " - You can do either of these two things to fix this problem:" ) failure_list.append( f" - To make it{looks_public_str} public: {fix_is_public}" ) failure_list.append( f" - To make it{is_public_str} look public: {fix_looks_public}" ) if hasattr(mod, "__all__"): public_api = mod.__all__ all_api = dir(mod) for elem in all_api: check_one_element( elem, modname, mod, is_public=elem in public_api, is_all=True ) else: all_api = dir(mod) for elem in all_api: if not elem.startswith("_"): check_one_element( elem, modname, mod, is_public=True, is_all=False )
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS import pkgutil import torch import sys from typing import Callable import inspect import json import os import unittest NOT_IMPORTED_WHEN_TEST_WRITTEN = { "torch.fx.experimental.normalize", "torch.fx.experimental.proxy_tensor", "torch.fx.experimental.schema_type_annotation", "torch.fx.experimental.symbolic_shapes", "torch.fx.passes.backends.cudagraphs", "torch.fx.passes.infra.partitioner", "torch.fx.passes.utils.fuser_utils", }
import importlib import inspect import json import logging import os import pkgutil import unittest from typing import Callable import torch from torch._utils_internal import get_file_path_2 from torch.testing._internal.common_utils import ( IS_JETSON, IS_MACOS, IS_WINDOWS, run_tests, skipIfTorchDynamo, TestCase, ) log = logging.getLogger(__name__)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified